{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n\\n' % (\r\n header_html,\r\n)\r\nst.sidebar.markdown(\r\n header_full,\r\n unsafe_allow_html=True,\r\n)\r\n\r\n# Long Form QA with ELI5 and Wikipedia\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= '\\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\\na pre-processed fixed snapshot of Wikipedia.\\n'\r\nst.sidebar.markdown(description, unsafe_allow_html=True)\r\n\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= [\r\n 'Answer the question',\r\n 'View the retrieved document only',\r\n 'View the most similar ELI5 question and answer',\r\n 'Show me everything, please!',\r\n]\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= st.sidebar.checkbox(\"\"\"Demo options\"\"\")\r\nif demo_options:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t= st.sidebar.selectbox(\r\n\t\t\t \"\"\"\"\"\",\r\n\t\t\t action_list,\r\n\t\t\t index=3,\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= action_list.index(action_st)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= st.sidebar.selectbox(\r\n\t\t\t \"\"\"\"\"\",\r\n\t\t\t [\"\"\"Show full text of passages\"\"\", \"\"\"Show passage section titles\"\"\"],\r\n\t\t\t index=0,\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t= show_type == 'Show full text of passages'\r\nelse:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= 3\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= True\r\n\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= st.sidebar.checkbox(\"\"\"Retrieval options\"\"\")\r\nif retrieval_options:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t= '\\n ### Information retriever options\\n\\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\\n '\r\n\t\t\tst.sidebar.markdown(retriever_info)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= st.sidebar.selectbox(\"\"\"Which Wikipedia format should the model use?\"\"\", [\"\"\"wiki40b\"\"\", \"\"\"none\"\"\"])\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= st.sidebar.selectbox(\"\"\"Which Wikipedia indexer should the model use?\"\"\", [\"\"\"dense\"\"\", \"\"\"sparse\"\"\", \"\"\"mixed\"\"\"])\r\nelse:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= 'wiki40b'\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= 'dense'\r\n\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= 'beam'\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= 2\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= 64\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= 256\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t= None\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t= None\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= st.sidebar.checkbox(\"\"\"Generation options\"\"\")\r\nif generate_options:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t= '\\n ### Answer generation options\\n\\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\\n **beam** search, or **sample** from the decoder\\'s output probabilities.\\n '\r\n\t\t\tst.sidebar.markdown(generate_info)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= st.sidebar.selectbox(\"\"\"Would you like to use beam search or sample an answer?\"\"\", [\"\"\"beam\"\"\", \"\"\"sampled\"\"\"])\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= st.sidebar.slider(\r\n\t\t\t \"\"\"Minimum generation length\"\"\", min_value=8, max_value=256, value=64, step=8, format=None, key=None\r\n\t\t\t)\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= st.sidebar.slider(\r\n\t\t\t \"\"\"Maximum generation length\"\"\", min_value=64, max_value=512, value=256, step=16, format=None, key=None\r\n\t\t\t)\r\n\t\t\tif sampled == \"beam\":\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= st.sidebar.slider(\"\"\"Beam size\"\"\", min_value=1, max_value=8, value=2, step=None, format=None, key=None)\r\n\t\t\telse:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= st.sidebar.slider(\r\n\t\t\t\t\t\t \"\"\"Nucleus sampling p\"\"\", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= st.sidebar.slider(\r\n\t\t\t\t\t\t \"\"\"Temperature\"\"\", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t= None\r\n\r\n# start main text\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= [\r\n '',\r\n 'How do people make chocolate?',\r\n 'Why do we get a fever when we are sick?',\r\n 'How can different animals perceive different colors?',\r\n 'What is natural language processing?',\r\n 'What\\'s the best way to treat a sunburn?',\r\n 'What exactly are vitamins ?',\r\n 'How does nuclear energy provide electricity?',\r\n 'What\\'s the difference between viruses and bacteria?',\r\n 'Why are flutes classified as woodwinds when most of them are made out of metal ?',\r\n 'Why do people like drinking coffee even though it tastes so bad?',\r\n 'What happens when wine ages? How does it make the wine taste better?',\r\n 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',\r\n 'How can we set a date to the beginning or end of an artistic period? Doesn\\'t the change happen gradually?',\r\n 'How does New Zealand have so many large bird predators?',\r\n]\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= st.selectbox(\r\n \"\"\"What would you like to ask? ---- select to enter a new query\"\"\",\r\n questions_list,\r\n index=1,\r\n)\r\nif question_s == \"\":\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= st.text_input(\"\"\"Enter your question here:\"\"\", \"\"\"\"\"\")\r\nelse:\r\n\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= question_s\r\n\r\nif st.button(\"\"\"Show me!\"\"\"):\r\n\t\t\tif action in [0, 1, 3]:\r\n\t\t\t\t\t\tif index_type == \"mixed\":\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= make_support(question, source=wiki_source, method=\"\"\"dense\"\"\", n_results=10)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t= make_support(question, source=wiki_source, method=\"\"\"sparse\"\"\", n_results=10)\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\tfor res_d, res_s in zip(support_list_dense, support_list_sparse):\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tuple(res_d) not in support_list:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsupport_list += [tuple(res_d)]\r\n\t\t\t\t\t\t\t\t\t\t\t\tif tuple(res_s) not in support_list:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsupport_list += [tuple(res_s)]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= support_list[:10]\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t= '

' + '

'.join([res[-1] for res in support_list])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= make_support(question, source=wiki_source, method=index_type, n_results=10)\r\n\t\t\tif action in [0, 3]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t= answer_question(\r\n\t\t\t\t\t\t question_doc,\r\n\t\t\t\t\t\t sas_model,\r\n\t\t\t\t\t\t sas_tokenizer,\r\n\t\t\t\t\t\t min_len=min_len,\r\n\t\t\t\t\t\t max_len=int(max_len),\r\n\t\t\t\t\t\t sampling=(sampled == \"\"\"sampled\"\"\"),\r\n\t\t\t\t\t\t n_beams=n_beams,\r\n\t\t\t\t\t\t top_p=top_p,\r\n\t\t\t\t\t\t temp=temp,\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tst.markdown(\"\"\"### The model generated answer is:\"\"\")\r\n\t\t\t\t\t\tst.write(answer)\r\n\t\t\tif action in [0, 1, 3] and wiki_source != \"none\":\r\n\t\t\t\t\t\tst.markdown(\"\"\"--- \\n ### The model is drawing information from the following Wikipedia passages:\"\"\")\r\n\t\t\t\t\t\tfor i, res in enumerate(support_list):\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t= 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(\"\"\" \"\"\", \"\"\"_\"\"\"))\r\n\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= res[1].strip()\r\n\t\t\t\t\t\t\t\t\tif sec_titles == \"\":\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= '[{}]({})'.format(res[0], wiki_url)\r\n\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t= sec_titles.split(\"\"\" & \"\"\")\r\n\t\t\t\t\t\t\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= ' & '.join(\r\n\t\t\t\t\t\t\t\t\t\t\t\t [\"\"\"[{}]({}#{})\"\"\".format(sec.strip(), wiki_url, sec.strip().replace(\"\"\" \"\"\", \"\"\"_\"\"\")) for sec in sec_list]\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tst.markdown(\r\n\t\t\t\t\t\t\t\t\t \"\"\"{0:02d} - **Article**: {1:<18}
_Section_: {2}\"\"\".format(i + 1, res[0], sections),\r\n\t\t\t\t\t\t\t\t\t unsafe_allow_html=True,\r\n\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tif show_passages:\r\n\t\t\t\t\t\t\t\t\t\t\t\tst.write(\r\n\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"> \"\"\" + res[-1] + \"\"\"\"\"\", unsafe_allow_html=True\r\n\t\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\tif action in [2, 3]:\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= find_nearest_training(question)\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t= nn_train_list[0]\r\n\t\t\t\t\t\tst.markdown(\r\n\t\t\t\t\t\t \"\"\"--- \\n ### The most similar question in the ELI5 training set was: \\n\\n {}\"\"\".format(train_exple[\"\"\"title\"\"\"])\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t= [\r\n\t\t\t\t\t\t '{}. {}'.format(i + 1, \"\"\" \\n\"\"\".join([line.strip() for line in ans.split(\"\"\"\\n\"\"\") if line.strip() != \"\"\"\"\"\"]))\r\n\t\t\t\t\t\t for i, (ans, sc) in enumerate(zip(train_exple[\"\"\"answers\"\"\"][\"\"\"text\"\"\"], train_exple[\"\"\"answers\"\"\"][\"\"\"score\"\"\"]))\r\n\t\t\t\t\t\t if i == 0 or sc > 2\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tst.markdown(\"\"\"##### Its answers were: \\n\\n {}\"\"\".format(\"\"\"\\n\"\"\".join(answers_st)))\r\n\r\n\r\nlowerCAmelCase_\t\t\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t= '\\n---\\n\\n**Disclaimer**\\n\\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\\n'\r\n\r\n\r\n\r\n\r\n\r\n\r\nst.sidebar.markdown(disclaimer, unsafe_allow_html=True)\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":435,"string":"435"},"style_context":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport os\r\nimport re\r\nfrom shutil import copyfile\r\nfrom typing import Any, Dict, List, Optional, Tuple\r\n\r\nimport sentencepiece as spm\r\n\r\nfrom ...tokenization_utils import AddedToken, PreTrainedTokenizer\r\nfrom ...utils import logging\r\n\r\n\r\nUpperCAmelCase__ : Union[str, Any] \t\t\t\t=\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\nUpperCAmelCase__ : Dict \t\t\t\t=\t\t\t\t\t{'vocab_file': 'spiece.model'}\r\n\r\nUpperCAmelCase__ : Tuple \t\t\t\t=\t\t\t\t\t{\r\n 'vocab_file': {\r\n 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',\r\n 'google/bigbird-roberta-large': (\r\n 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'\r\n ),\r\n 'google/bigbird-base-trivia-itc': (\r\n 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'\r\n ),\r\n }\r\n}\r\n\r\nUpperCAmelCase__ : Optional[int] \t\t\t\t=\t\t\t\t\t{\r\n 'google/bigbird-roberta-base': 4_0_9_6,\r\n 'google/bigbird-roberta-large': 4_0_9_6,\r\n 'google/bigbird-base-trivia-itc': 4_0_9_6,\r\n}\r\n\r\n\r\n\r\nclass lowerCAmelCase_\t\t(a__ ):\r\n\r\n\r\n\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\t\t\t\t__UpperCamelCase :\t\tOptional[int] \t\t\t= VOCAB_FILES_NAMES\r\n\t\t\t\t__UpperCamelCase :\t\tList[Any] \t\t\t= PRETRAINED_VOCAB_FILES_MAP\r\n\t\t\t\t__UpperCamelCase :\t\tint \t\t\t= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r\n\t\t\t\t__UpperCamelCase :\t\tList[str] \t\t\t= ['''input_ids''', '''attention_mask''']\r\n\t\t\t\t__UpperCamelCase :\t\tList[int] \t\t\t= []\r\n\r\n\r\n\r\n\t\t\t\tdef __init__(self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__=\"\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"[SEP]\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"[MASK]\" ,\t\t\tSCREAMING_SNAKE_CASE__=\"[CLS]\" ,\t\t\tSCREAMING_SNAKE_CASE__ = None ,\t\t\t**SCREAMING_SNAKE_CASE__ ,\t\t\t) ->\t\t\t\t\t\tNone:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tint\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else bos_token\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tDict\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else eos_token\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tTuple\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else unk_token\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[str]\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else pad_token\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else cls_token\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tint\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else sep_token\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# Mask token behave like a normal word, i.e. include the space before it\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[Any]\t = AddedToken(SCREAMING_SNAKE_CASE__ ,\t\t\tlstrip=SCREAMING_SNAKE_CASE__ ,\t\t\trstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ ) else mask_token\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tTuple\t = {} if sp_model_kwargs is None else sp_model_kwargs\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(\r\n\t\t\t\t\t\t\t\t\t\t\t bos_token=SCREAMING_SNAKE_CASE__ ,\t\t\teos_token=SCREAMING_SNAKE_CASE__ ,\t\t\tunk_token=SCREAMING_SNAKE_CASE__ ,\t\t\tpad_token=SCREAMING_SNAKE_CASE__ ,\t\t\tsep_token=SCREAMING_SNAKE_CASE__ ,\t\t\tmask_token=SCREAMING_SNAKE_CASE__ ,\t\t\tcls_token=SCREAMING_SNAKE_CASE__ ,\t\t\tsp_model_kwargs=self.sp_model_kwargs ,\t\t\t**SCREAMING_SNAKE_CASE__ ,\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[Any]\t = vocab_file\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tTuple\t = spm.SentencePieceProcessor(**self.sp_model_kwargs )\r\n\t\t\t\t\t\t\t\t\t\t\tself.sp_model.Load(SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\n\r\n\t\t\t\t@property\r\n\t\t\t\tdef __magic_name__ (self ) ->\t\t\t\t\t\tTuple:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\treturn self.sp_model.get_piece_size()\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ) ->\t\t\t\t\t\tTuple:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[str]\t = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}\r\n\t\t\t\t\t\t\t\t\t\t\tvocab.update(self.added_tokens_encoder )\r\n\t\t\t\t\t\t\t\t\t\t\treturn vocab\r\n\r\n\r\n\r\n\t\t\t\tdef __getstate__(self ) ->\t\t\t\t\t\tUnion[str, Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = self.__dict__.copy()\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tTuple\t = None\r\n\t\t\t\t\t\t\t\t\t\t\treturn state\r\n\r\n\r\n\r\n\t\t\t\tdef __setstate__(self ,\t\t\tSCREAMING_SNAKE_CASE__ ) ->\t\t\t\t\t\tAny:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tDict\t = d\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# for backward compatibility\r\n\t\t\t\t\t\t\t\t\t\t\tif not hasattr(self ,\t\t\t\"\"\"sp_model_kwargs\"\"\" ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = {}\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tstr\t = spm.SentencePieceProcessor(**self.sp_model_kwargs )\r\n\t\t\t\t\t\t\t\t\t\t\tself.sp_model.Load(self.vocab_file )\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ) ->\t\t\t\t\t\tList[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\treturn self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,\t\t\tout_type=SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ) ->\t\t\t\t\t\tList[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\treturn self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ) ->\t\t\t\t\t\tAny:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[Any]\t = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\treturn token\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ) ->\t\t\t\t\t\tList[Any]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tTuple\t = []\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[str]\t = \"\"\"\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tAny\t = False\r\n\t\t\t\t\t\t\t\t\t\t\tfor token in tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# make sure that special tokens are not decoded using sentencepiece model\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif token in self.all_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not prev_is_special:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_string += \" \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tUnion[str, Any]\t = True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = []\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcurrent_sub_tokens.append(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[int]\t = False\r\n\t\t\t\t\t\t\t\t\t\t\tout_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\treturn out_string.strip()\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ = False ,\t\t\tSCREAMING_SNAKE_CASE__ = None ,\t\t\tSCREAMING_SNAKE_CASE__ = True ,\t\t\t**SCREAMING_SNAKE_CASE__ ,\t\t\t) ->\t\t\t\t\t\tstr:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tUnion[str, Any]\t = kwargs.pop(\"\"\"use_source_tokenizer\"\"\" ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[str]\t = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ,\t\t\tskip_special_tokens=SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# To avoid mixing byte-level and unicode for byte-level BPT\r\n\t\t\t\t\t\t\t\t\t\t\t# we need to build string separately for added tokens and byte-level tokens\r\n\t\t\t\t\t\t\t\t\t\t\t# cf. https://github.com/huggingface/transformers/issues/1133\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[int]\t = []\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tstr\t = []\r\n\t\t\t\t\t\t\t\t\t\t\tfor token in filtered_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif skip_special_tokens and token in self.all_special_ids:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif token in self.added_tokens_encoder:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_sub_text:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tUnion[str, Any]\t = []\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsub_texts.append(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcurrent_sub_text.append(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\tif current_sub_text:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# Mimic the behavior of the Rust tokenizer:\r\n\t\t\t\t\t\t\t\t\t\t\t# No space before [MASK] and [SEP]\r\n\t\t\t\t\t\t\t\t\t\t\tif spaces_between_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = re.sub(r\"\"\" (\\[(MASK|SEP)\\])\"\"\" ,\t\t\tr\"\"\"\\1\"\"\" ,\t\t\t\"\"\" \"\"\".join(SCREAMING_SNAKE_CASE__ ) )\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tAny\t = \"\"\"\"\"\".join(SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[Any]\t = (\r\n\t\t\t\t\t\t\t\t\t\t\t clean_up_tokenization_spaces\r\n\t\t\t\t\t\t\t\t\t\t\t if clean_up_tokenization_spaces is not None\r\n\t\t\t\t\t\t\t\t\t\t\t else self.clean_up_tokenization_spaces\r\n\t\t\t\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tif clean_up_tokenization_spaces:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tUnion[str, Any]\t = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn clean_text\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn text\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ = None ) ->\t\t\t\t\t\tTuple[str]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif not os.path.isdir(SCREAMING_SNAKE_CASE__ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[Any]\t = os.path.join(\r\n\t\t\t\t\t\t\t\t\t\t\t SCREAMING_SNAKE_CASE__ ,\t\t\t(filename_prefix + \"\"\"-\"\"\" if filename_prefix else \"\"\"\"\"\") + VOCAB_FILES_NAMES[\"\"\"vocab_file\"\"\"] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcopyfile(self.vocab_file ,\t\t\tSCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\telif not os.path.isfile(self.vocab_file ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith open(SCREAMING_SNAKE_CASE__ ,\t\t\t\"\"\"wb\"\"\" ) as fi:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[int]\t = self.sp_model.serialized_model_proto()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfi.write(SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\treturn (out_vocab_file,)\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ = None ) ->\t\t\t\t\t\tList[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn [self.cls_token_id] + token_ids_a + [self.sep_token_id]\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tOptional[Any]\t = [self.cls_token_id]\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tDict\t = [self.sep_token_id]\r\n\t\t\t\t\t\t\t\t\t\t\treturn cls + token_ids_a + sep + token_ids_a + sep\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ = None ,\t\t\tSCREAMING_SNAKE_CASE__ = False ) ->\t\t\t\t\t\tList[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif already_has_special_tokens:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn super().get_special_tokens_mask(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t token_ids_a=SCREAMING_SNAKE_CASE__ ,\t\t\ttoken_ids_a=SCREAMING_SNAKE_CASE__ ,\t\t\talready_has_special_tokens=SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]\r\n\t\t\t\t\t\t\t\t\t\t\treturn [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]\r\n\r\n\r\n\r\n\t\t\t\tdef __magic_name__ (self ,\t\t\tSCREAMING_SNAKE_CASE__ ,\t\t\tSCREAMING_SNAKE_CASE__ = None ) ->\t\t\t\t\t\tList[int]:\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tstr\t = [self.sep_token_id]\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t:\t\t\tList[str]\t = [self.cls_token_id]\r\n\t\t\t\t\t\t\t\t\t\t\tif token_ids_a is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep ) * [0]\r\n\t\t\t\t\t\t\t\t\t\t\treturn len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":223,"string":"223"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":501,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\r\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r _LazyModule,\r is_torch_available,\r)\r\r\rSCREAMING_SNAKE_CASE__:Any\t = {\r \"\"\"configuration_gpt_bigcode\"\"\": [\"\"\"GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\", \"\"\"GPTBigCodeConfig\"\"\"],\r}\r\rtry:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r pass\relse:\r SCREAMING_SNAKE_CASE__:Dict\t = [\r \"\"\"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r \"\"\"GPTBigCodeForSequenceClassification\"\"\",\r \"\"\"GPTBigCodeForTokenClassification\"\"\",\r \"\"\"GPTBigCodeForCausalLM\"\"\",\r \"\"\"GPTBigCodeModel\"\"\",\r \"\"\"GPTBigCodePreTrainedModel\"\"\",\r ]\r\rif TYPE_CHECKING:\r from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig\r\r try:\r if not is_torch_available():\r raise OptionalDependencyNotAvailable()\r except OptionalDependencyNotAvailable:\r pass\r else:\r from .modeling_gpt_bigcode import (\r GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,\r GPTBigCodeForCausalLM,\r GPTBigCodeForSequenceClassification,\r GPTBigCodeForTokenClassification,\r GPTBigCodeModel,\r GPTBigCodePreTrainedModel,\r )\r\r\relse:\r import sys\r\r SCREAMING_SNAKE_CASE__:Optional[Any]\t = _LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\r"},"code_codestyle":{"kind":"number","value":720,"string":"720"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\r\rfrom __future__ import annotations\r\r\r\rdef _lowerCamelCase( a ,\t\t\t\t\t\ta ,\t\t\t\t\t\ta\t\t\t\t\t\t\t):\r if len(a\t\t\t\t\t\t\t) == 0:\r raise ValueError(\"find_max() arg is an empty sequence\"\t\t\t\t\t\t\t)\r if (\r left >= len(a\t\t\t\t\t\t\t)\r or left < -len(a\t\t\t\t\t\t\t)\r or right >= len(a\t\t\t\t\t\t\t)\r or right < -len(a\t\t\t\t\t\t\t)\r ):\r raise IndexError(\"list index out of range\"\t\t\t\t\t\t\t)\r if left == right:\r return nums[left]\r __a\t\t\t =\t\t\t\t\t\t(left + right) >> 1 # the middle\r __a\t\t\t =\t\t\t\t\t\tfind_max(a ,\t\t\t\t\t\ta ,\t\t\t\t\t\ta\t\t\t\t\t\t\t) # find max in range[left, mid]\r __a\t\t\t =\t\t\t\t\t\tfind_max(a ,\t\t\t\t\t\tmid + 1 ,\t\t\t\t\t\ta\t\t\t\t\t\t\t) # find max in range[mid + 1, right]\r\r return left_max if left_max >= right_max else right_max\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod(verbose=True)\r"},"style_context_codestyle":{"kind":"number","value":67,"string":"67"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":502,"cells":{"code":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\r\rimport requests\rfrom bsa import BeautifulSoup\r\r\r\r\r\r\rdef \t\t\t\ta ( UpperCamelCase_ : Union[str, Any] = \"https://www.worldometers.info/coronavirus\"\t\t\t) ->\tdict:\r\t\tsnake_case__\t =BeautifulSoup(requests.get(_lowercase\t\t\t).text ,\t\t\t\t\t'html.parser'\t\t\t)\r\t\tsnake_case__\t =soup.findAll('h1'\t\t\t)\r\t\tsnake_case__\t =soup.findAll('div' ,\t\t\t\t\t{'class': 'maincounter-number'}\t\t\t)\r\t\tkeys += soup.findAll('span' ,\t\t\t\t\t{'class': 'panel-title'}\t\t\t)\r\t\tvalues += soup.findAll('div' ,\t\t\t\t\t{'class': 'number-table-main'}\t\t\t)\r\t\treturn {key.text.strip(): value.text.strip() for key, value in zip(_lowercase ,\t\t\t\t\t_lowercase\t\t\t)}\r\r\rif __name__ == \"__main__\":\r\t\t\tprint('''\\033[1m''' + '''COVID-19 Status of the World''' + '''\\033[0m\\n''')\r\t\t\tfor key, value in world_covidaa_stats().items():\r\t\t\t\t\t\tprint(f\"\"\"{key}\\n{value}\\n\"\"\")\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":538,"string":"538"},"style_context":{"kind":"string","value":"\n'''simple docstring'''\n\n\nimport importlib\n\nimport torch\nimport yaml\nfrom omegaconf import OmegaConf\nfrom taming.models.vqgan import VQModel\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase=False\t\t\t\t\t) ->\t\t\t\tDict:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_ : Tuple =\t\tOmegaConf.load(_lowercase\t\t\t\t\t)\n\t\t\t\t\tif display:\n\t\t\t\t\t\t\t\t\t\tprint(yaml.dump(OmegaConf.to_container(_lowercase\t\t\t\t\t)\t\t\t\t\t)\t\t\t\t\t)\n\t\t\t\t\treturn config\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase=None ,\t\t\t\t\t\t\t_lowercase=None\t\t\t\t\t) ->\t\t\t\tOptional[int]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tif conf_path is None:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : int =\t\t'''./model_checkpoints/vqgan_only.yaml'''\n\t\t\t\t\tlowerCamelCase_ : Dict =\t\tload_config(_lowercase ,\t\t\t\t\t\t\tdisplay=_lowercase\t\t\t\t\t)\n\t\t\t\t\tlowerCamelCase_ : List[str] =\t\tVQModel(**config.model.params\t\t\t\t\t)\n\t\t\t\t\tif ckpt_path is None:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : int =\t\t'''./model_checkpoints/vqgan_only.pt'''\n\t\t\t\t\tlowerCamelCase_ : Union[str, Any] =\t\ttorch.load(_lowercase ,\t\t\t\t\t\t\tmap_location=_lowercase\t\t\t\t\t)\n\t\t\t\t\tif \".ckpt\" in ckpt_path:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : str =\t\tsd['''state_dict''']\n\t\t\t\t\tmodel.load_state_dict(_lowercase ,\t\t\t\t\t\t\tstrict=_lowercase\t\t\t\t\t)\n\t\t\t\t\tmodel.to(_lowercase\t\t\t\t\t)\n\t\t\t\t\tdel sd\n\t\t\t\t\treturn model\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase\t\t\t\t\t) ->\t\t\t\tList[str]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any =\t\tmodel.encode(_lowercase\t\t\t\t\t)\n\t\t\t\t\tprint(F\"\"\"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}\"\"\"\t\t\t\t\t)\n\t\t\t\t\tlowerCamelCase_ : Any =\t\tmodel.decode(_lowercase\t\t\t\t\t)\n\t\t\t\t\treturn xrec\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase=False\t\t\t\t\t) ->\t\t\t\tAny:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_, lowerCamelCase_ : Any =\t\tstring.rsplit('''.''' ,\t\t\t\t\t\t\t1\t\t\t\t\t)\n\t\t\t\t\tif reload:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : int =\t\timportlib.import_module(_lowercase\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\timportlib.reload(_lowercase\t\t\t\t\t)\n\t\t\t\t\treturn getattr(importlib.import_module(_lowercase ,\t\t\t\t\t\t\tpackage=_lowercase\t\t\t\t\t) ,\t\t\t\t\t\t\tcls\t\t\t\t\t)\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase\t\t\t\t\t) ->\t\t\t\tList[str]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tif \"target\" not in config:\n\t\t\t\t\t\t\t\t\t\traise KeyError('''Expected key `target` to instantiate.'''\t\t\t\t\t)\n\t\t\t\t\treturn get_obj_from_str(config['''target''']\t\t\t\t\t)(**config.get('''params''' ,\t\t\t\t\t\t\t{}\t\t\t\t\t)\t\t\t\t\t)\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase ,\t\t\t\t\t\t\t_lowercase=True ,\t\t\t\t\t\t\t_lowercase=True\t\t\t\t\t) ->\t\t\t\tAny:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_ : int =\t\tinstantiate_from_config(_lowercase\t\t\t\t\t)\n\t\t\t\t\tif sd is not None:\n\t\t\t\t\t\t\t\t\t\tmodel.load_state_dict(_lowercase\t\t\t\t\t)\n\t\t\t\t\tif gpu:\n\t\t\t\t\t\t\t\t\t\tmodel.cuda()\n\t\t\t\t\tif eval_mode:\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\treturn {\"model\": model}\n\n\n\n\n\n\ndef lowercase_\t\t\t\t\t\t( _lowercase ,\t\t\t\t\t\t\t_lowercase ,\t\t\t\t\t\t\t_lowercase ,\t\t\t\t\t\t\t_lowercase\t\t\t\t\t) ->\t\t\t\tTuple:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tif ckpt:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : List[Any] =\t\ttorch.load(_lowercase ,\t\t\t\t\t\t\tmap_location='''cpu'''\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : int =\t\tpl_sd['''global_step''']\n\t\t\t\t\t\t\t\t\t\tprint(F\"\"\"loaded model from global step {global_step}.\"\"\"\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : Optional[int] =\t\t{'''state_dict''': None}\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_ : str =\t\tNone\n\t\t\t\t\tlowerCamelCase_ : Any =\t\tload_model_from_config(config.model ,\t\t\t\t\t\t\tpl_sd['''state_dict'''] ,\t\t\t\t\t\t\tgpu=_lowercase ,\t\t\t\t\t\t\teval_mode=_lowercase\t\t\t\t\t)['''model''']\n\t\t\t\t\treturn model, global_step\n\n\n"},"style_context_codestyle":{"kind":"number","value":422,"string":"422"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":503,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom math import pi\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tlowercase\t\t\t\t(_A ,\t\t\t\t\t\t_A\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return 2 * pi * radius * (angle / 3_6_0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(arc_length(90, 10))\r\n\r\n"},"code_codestyle":{"kind":"number","value":716,"string":"716"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport math\r\nfrom datetime import datetime, timedelta\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tlowercase\t\t\t\t(_A\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] \t\t\t= year % 1_9\r\n _lowerCAmelCase :\t\t\t\t\t\t\tAny \t\t\t= year % 4\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[int] \t\t\t= year % 7\r\n _lowerCAmelCase :\t\t\t\t\t\t\tint \t\t\t= math.floor(year / 1_0_0\t\t\t\t\t)\r\n _lowerCAmelCase :\t\t\t\t\t\t\tDict \t\t\t= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5\t\t\t\t\t)\r\n _lowerCAmelCase :\t\t\t\t\t\t\tOptional[Any] \t\t\t= leap_day_inhibits / 4\r\n _lowerCAmelCase :\t\t\t\t\t\t\tDict \t\t\t= (\r\n 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number\r\n ) % 3_0\r\n _lowerCAmelCase :\t\t\t\t\t\t\tList[Any] \t\t\t= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7\r\n\r\n # days to be added to March 21\r\n _lowerCAmelCase :\t\t\t\t\t\t\tDict \t\t\t= (1_9 * metonic_cycle + secular_moon_shift) % 3_0\r\n\r\n # PHM -> Paschal Full Moon\r\n _lowerCAmelCase :\t\t\t\t\t\t\tUnion[str, Any] \t\t\t= (\r\n 2 * julian_leap_year\r\n + 4 * non_leap_year\r\n + 6 * days_to_add\r\n + century_starting_point\r\n ) % 7\r\n\r\n if days_to_add == 2_9 and days_from_phm_to_sunday == 6:\r\n return datetime(_A ,\t\t\t\t\t\t4 ,\t\t\t\t\t\t1_9\t\t\t\t\t)\r\n elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:\r\n return datetime(_A ,\t\t\t\t\t\t4 ,\t\t\t\t\t\t1_8\t\t\t\t\t)\r\n else:\r\n return datetime(_A ,\t\t\t\t\t\t3 ,\t\t\t\t\t\t2_2\t\t\t\t\t) + timedelta(\r\n days=int(days_to_add + days_from_phm_to_sunday\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for year in (19_94, 20_00, 20_10, 20_21, 20_23):\r\n lowerCAmelCase : List[str]\t\t\t\t\t=\t\t\t\t\t\t\"\"\"will be\"\"\" if year > datetime.now().year else \"\"\"was\"\"\"\r\n print(F'''Easter in {year} {tense} {gauss_easter(year)}''')\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":630,"string":"630"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":504,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom datetime import datetime\r\n\r\nimport requests\r\n\r\ndef snake_case (UpperCAmelCase__ )\t\t\t->\t\t\t\t\t\t\tbytes:\r\n UpperCamelCase_: Any =\t\t\t\t\t\t\t'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='\r\n UpperCamelCase_: int =\t\t\t\t\t\t\trequests.get(base_url + url ).json()[0]['urls'][0]['src']\r\n return requests.get(UpperCAmelCase__ ).content\r\n\r\n\r\nif __name__ == \"__main__\":\r\n A_\t\t\t\t\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t =\t\t\t\t\tinput('Enter Video/IGTV url: ').strip()\r\n A_\t\t\t\t\t\t:\t\t\t\t\t\tint\t\t\t\t\t\t =\t\t\t\t\tF'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''\r\n with open(file_name, 'wb') as fp:\r\n fp.write(download_video(url))\r\n print(F'''Done. Video saved to disk as {file_name}.''')"},"code_codestyle":{"kind":"number","value":57,"string":"57"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom collections import namedtuple\r\n\r\n__snake_case\t\t\t\t\t\t:\t\t\t\t\tOptional[int]\t = namedtuple('from_to', 'from_ to')\r\n\r\n__snake_case\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t = {\r\n 'cubicmeter': from_to(1, 1),\r\n 'litre': from_to(0.001, 1_000),\r\n 'kilolitre': from_to(1, 1),\r\n 'gallon': from_to(0.00_454, 264.172),\r\n 'cubicyard': from_to(0.76_455, 1.30_795),\r\n 'cubicfoot': from_to(0.028, 35.3_147),\r\n 'cup': from_to(0.000_236_588, 4_226.75),\r\n}\r\n\r\n\r\n\r\ndef \ta_\t\t\t\t\t\t( __a\t\t,\t\t\t\t__a\t\t,\t\t\t\t__a ):\r\n if from_type not in METRIC_CONVERSION:\r\n raise ValueError(\r\n f'''Invalid \\'from_type\\' value: {from_type!r} Supported values are:\\n'''\r\n + ''', '''.join(__a ) )\r\n if to_type not in METRIC_CONVERSION:\r\n raise ValueError(\r\n f'''Invalid \\'to_type\\' value: {to_type!r}. Supported values are:\\n'''\r\n + ''', '''.join(__a ) )\r\n return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":571,"string":"571"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":505,"cells":{"code":{"kind":"string","value":"\r\r\r\r\rimport warnings\rfrom collections import OrderedDict\rfrom typing import Mapping\r\rfrom packaging import version\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...onnx import OnnxConfig\rfrom ...utils import logging\r\r\r__A\t\t\t\t\t\t:\t\t\t\t\t\t\tstr\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\r__A\t\t\t\t\t\t:\t\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= {\r \"\"\"nvidia/segformer-b0-finetuned-ade-512-512\"\"\": (\r \"\"\"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json\"\"\"\r ),\r # See all SegFormer models at https://huggingface.co/models?filter=segformer\r}\r\r\r\r\rclass \t\t\tUpperCAmelCase_ ( A ):\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\t\t\t\t\t\t\ta__\t\t\t\t\t\t = '''segformer'''\r\r\r\r\r\r\t\t\t\t\t\t\tdef __init__(\t\t\t\t\tself :\tOptional[Any] , a :\tOptional[int]=3 , a :\tint=4 , a :\tTuple=[2, 2, 2, 2] , a :\tstr=[8, 4, 2, 1] , a :\tUnion[str, Any]=[32, 64, 160, 256] , a :\tDict=[7, 3, 3, 3] , a :\tOptional[Any]=[4, 2, 2, 2] , a :\tList[str]=[1, 2, 5, 8] , a :\tint=[4, 4, 4, 4] , a :\tAny=\"gelu\" , a :\tOptional[Any]=0.0 , a :\tAny=0.0 , a :\tAny=0.1 , a :\tUnion[str, Any]=0.02 , a :\tint=0.1 , a :\tTuple=1E-6 , a :\tOptional[int]=256 , a :\tDict=255 , **a :\tUnion[str, Any] , )\t\t\t->\t\t\t\t\tOptional[int]:\r\t\t\t\t\t\t\t\t\t\tsuper().__init__(**a\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\tif \"reshape_last_stage\" in kwargs and kwargs[\"reshape_last_stage\"] is False:\r\t\t\t\t\t\t\t\t\t\t\t\t\twarnings.warn(\r\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be\"\"\"\r\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\" removed, as the behaviour will default to that of reshape_last_stage = True.\"\"\" , a , )\r\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tnum_channels\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tnum_encoder_blocks\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tdepths\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tsr_ratios\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\thidden_sizes\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tpatch_sizes\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tstrides\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tmlp_ratios\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tnum_attention_heads\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\thidden_act\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\thidden_dropout_prob\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tattention_probs_dropout_prob\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tclassifier_dropout_prob\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tinitializer_range\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tdrop_path_rate\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tlayer_norm_eps\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tdecoder_hidden_size\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tkwargs.get(\"\"\"reshape_last_stage\"\"\" , a\t\t\t)\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tsemantic_loss_ignore_index\r\r\r\r\r\rclass \t\t\tUpperCAmelCase_ ( A ):\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\t\t\t\t\t\t\ta__\t\t\t\t\t\t = version.parse('''1.11''' )\r\r\r\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tDict\t\t\t)\t\t\t->\t\t\t\t\tMapping[str, Mapping[int, str]]:\r\t\t\t\t\t\t\t\t\t\treturn OrderedDict(\r\t\t\t\t\t\t\t\t\t\t [\r\t\t\t\t\t\t\t\t\t\t (\"\"\"pixel_values\"\"\", {0: \"\"\"batch\"\"\", 1: \"\"\"num_channels\"\"\", 2: \"\"\"height\"\"\", 3: \"\"\"width\"\"\"}),\r\t\t\t\t\t\t\t\t\t\t ]\t\t\t)\r\r\r\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tstr\t\t\t)\t\t\t->\t\t\t\t\tfloat:\r\t\t\t\t\t\t\t\t\t\treturn 1E-4\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t@property\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tUnion[str, Any]\t\t\t)\t\t\t->\t\t\t\t\tint:\r\t\t\t\t\t\t\t\t\t\treturn 12\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":450,"string":"450"},"style_context":{"kind":"string","value":"\r\r\r\r\rimport os\rimport sys\rimport unittest\r\r\r__A\t\t\t\t\t\t:\t\t\t\t\t\t\tAny\t\t\t\t\t\t\t= os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))\rsys.path.append(os.path.join(git_repo_path, \"\"\"utils\"\"\"))\r\rimport check_dummies # noqa: E402\rfrom check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402\r\r\r# Align TRANSFORMERS_PATH in check_dummies with the current path\r__A\t\t\t\t\t\t:\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= os.path.join(git_repo_path, \"\"\"src\"\"\", \"\"\"diffusers\"\"\")\r\r\r\r\rclass \t\t\tUpperCAmelCase_ ( unittest.TestCase ):\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tAny\t\t\t)\t\t\t->\t\t\t\t\tOptional[Any]:\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tfind_backend(\"\"\" if not is_torch_available():\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(a , \"\"\"torch\"\"\"\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\t# backend_with_underscore = find_backend(\" if not is_tensorflow_text_available():\")\r\t\t\t\t\t\t\t\t\t\t# self.assertEqual(backend_with_underscore, \"tensorflow_text\")\r\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tfind_backend(\"\"\" if not (is_torch_available() and is_transformers_available()):\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(a , \"\"\"torch_and_transformers\"\"\"\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\t# double_backend_with_underscore = find_backend(\r\t\t\t\t\t\t\t\t\t\t# \" if not (is_sentencepiece_available() and is_tensorflow_text_available()):\"\r\t\t\t\t\t\t\t\t\t\t# )\r\t\t\t\t\t\t\t\t\t\t# self.assertEqual(double_backend_with_underscore, \"sentencepiece_and_tensorflow_text\")\r\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tfind_backend(\r\t\t\t\t\t\t\t\t\t\t \"\"\" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(a , \"\"\"torch_and_transformers_and_onnx\"\"\"\t\t\t)\r\r\r\r\r\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tList[str]\t\t\t)\t\t\t->\t\t\t\t\tint:\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tread_init()\r\t\t\t\t\t\t\t\t\t\t# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"torch\"\"\" , a\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"torch_and_transformers\"\"\" , a\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"flax_and_transformers\"\"\" , a\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"torch_and_transformers_and_onnx\"\"\" , a\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\t# Likewise, we can't assert on the exact content of a key\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"UNet2DModel\"\"\" , objects[\"\"\"torch\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"FlaxUNet2DConditionModel\"\"\" , objects[\"\"\"flax\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"StableDiffusionPipeline\"\"\" , objects[\"\"\"torch_and_transformers\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"FlaxStableDiffusionPipeline\"\"\" , objects[\"\"\"flax_and_transformers\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"LMSDiscreteScheduler\"\"\" , objects[\"\"\"torch_and_scipy\"\"\"]\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertIn(\"\"\"OnnxStableDiffusionPipeline\"\"\" , objects[\"\"\"torch_and_transformers_and_onnx\"\"\"]\t\t\t)\r\r\r\r\r\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tAny\t\t\t)\t\t\t->\t\t\t\t\tint:\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tcreate_dummy_object(\"\"\"CONSTANT\"\"\" , \"\"\"'torch'\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(a , \"\"\"\\nCONSTANT = None\\n\"\"\"\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tcreate_dummy_object(\"\"\"function\"\"\" , \"\"\"'torch'\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\t\t\t\t\t\t\t\t\t\t a , \"\"\"\\ndef function(*args, **kwargs):\\n requires_backends(function, 'torch')\\n\"\"\"\t\t\t)\r\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\t\"\"\"\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\"\"\"\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tcreate_dummy_object(\"\"\"FakeClass\"\"\" , \"\"\"'torch'\"\"\"\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(a , a\t\t\t)\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tdef _UpperCAmelCase (\t\t\t\t\tself :\tUnion[str, Any]\t\t\t)\t\t\t->\t\t\t\t\tUnion[str, Any]:\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\t\"\"\"# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\\\"torch\\\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\\\"torch\\\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\\\"torch\\\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\\\"torch\\\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\\\"torch\\\"])\n\"\"\"\r\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE =\t\t\t\t\tcreate_dummy_files({\"\"\"torch\"\"\": [\"\"\"CONSTANT\"\"\", \"\"\"function\"\"\", \"\"\"FakeClass\"\"\"]}\t\t\t)\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(dummy_files[\"\"\"torch\"\"\"] , a\t\t\t)\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":450,"string":"450"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":506,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nfrom ..utils import DummyObject, requires_backends\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tAny\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Tuple:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Tuple:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tAny\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> int:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Tuple:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tList[Any]\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[str]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tint\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> str:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tList[Any]\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[str]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t\t\t\t\tA__\t\t\t\t\t\t\t( metaclass=UpperCAmelCase_\t\t\t\t\t):\r\n\tlowerCamelCase__\t\t\t\t\t\t:\t\t\t\t\tList[str]\t\t\t\t\t\t\t =[\"torch\", \"transformers\", \"onnx\"]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __init__( self\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Any:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(self\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> List[str]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@classmethod\r\n\tdef \t\t\t\t\tlowercase ( cls\t\t\t\t\t\t\t,\t*lowerCamelCase\t\t\t\t\t\t\t,\t**lowerCamelCase\t\t) -> Optional[int]:\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\trequires_backends(cls\t\t\t\t\t\t\t,\t['''torch''', '''transformers''', '''onnx''']\t\t)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":154,"string":"154"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\rfrom typing import Dict, List, Optional, Union\r\rimport numpy as np\r\rfrom transformers.utils import is_vision_available\rfrom transformers.utils.generic import TensorType\r\rfrom ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\rfrom ...image_transforms import (\r center_crop,\r get_resize_output_image_size,\r normalize,\r rescale,\r resize,\r to_channel_dimension_format,\r)\rfrom ...image_utils import (\r IMAGENET_STANDARD_MEAN,\r IMAGENET_STANDARD_STD,\r ChannelDimension,\r ImageInput,\r PILImageResampling,\r is_valid_image,\r to_numpy_array,\r valid_images,\r)\rfrom ...utils import logging\r\r\rif is_vision_available():\r import PIL\r\ra__\t\t\t:\t\t\t\t\tTuple \t\t\t\t\t\t=\t\t\t\t\t\tlogging.get_logger(__name__)\rdef __snake_case\t\t( SCREAMING_SNAKE_CASE_ :\t\t\tList[str] )\t\t\t\t\t\t\t-> List[List[ImageInput]]:\r\r \"\"\"simple docstring\"\"\"\r\r\r if isinstance(SCREAMING_SNAKE_CASE_\t\t\t,\t(list, tuple) ) and isinstance(videos[0]\t\t\t,\t(list, tuple) ) and is_valid_image(videos[0][0] ):\r return videos\r\r elif isinstance(SCREAMING_SNAKE_CASE_\t\t\t,\t(list, tuple) ) and is_valid_image(videos[0] ):\r return [videos]\r\r elif is_valid_image(SCREAMING_SNAKE_CASE_ ):\r return [[videos]]\r\r raise ValueError(f\"Could not make batched video from {videos}\" )\r\r\r\r\r\r\rclass lowerCAmelCase__ (\t\t\t\tUpperCAmelCase_ ):\r\r '''simple docstring'''\r _lowerCamelCase\t\t\t\t\t\t\t\t=[\"pixel_values\"]\r\r def __init__( self\t\t\t\t\t:\t\t\t\tint , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tPILImageResampling = PILImageResampling.BILINEAR , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tUnion[int, float] = 1 / 255 , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , **a__\t\t\t\t\t:\t\t\t\tUnion[str, Any] , ):\r super().__init__(**a__\t\t\t\t\t\t)\r UpperCAmelCase =\t\t\t\t\t\tsize if size is not None else {'''shortest_edge''': 256}\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__ , default_to_square=a__\t\t\t\t\t\t)\r UpperCAmelCase =\t\t\t\t\t\tcrop_size if crop_size is not None else {'''height''': 224, '''width''': 224}\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__ , param_name='''crop_size'''\t\t\t\t\t\t)\r\r UpperCAmelCase =\t\t\t\t\t\tdo_resize\r UpperCAmelCase =\t\t\t\t\t\tsize\r UpperCAmelCase =\t\t\t\t\t\tdo_center_crop\r UpperCAmelCase =\t\t\t\t\t\tcrop_size\r UpperCAmelCase =\t\t\t\t\t\tresample\r UpperCAmelCase =\t\t\t\t\t\tdo_rescale\r UpperCAmelCase =\t\t\t\t\t\trescale_factor\r UpperCAmelCase =\t\t\t\t\t\toffset\r UpperCAmelCase =\t\t\t\t\t\tdo_normalize\r UpperCAmelCase =\t\t\t\t\t\timage_mean if image_mean is not None else IMAGENET_STANDARD_MEAN\r UpperCAmelCase =\t\t\t\t\t\timage_std if image_std is not None else IMAGENET_STANDARD_STD\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tDict , a__\t\t\t\t\t:\t\t\t\tnp.ndarray , a__\t\t\t\t\t:\t\t\t\tDict[str, int] , a__\t\t\t\t\t:\t\t\t\tPILImageResampling = PILImageResampling.BILINEAR , a__\t\t\t\t\t:\t\t\t\tOptional[Union[str, ChannelDimension]] = None , **a__\t\t\t\t\t:\t\t\t\tOptional[int] , ):\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__ , default_to_square=a__\t\t\t\t\t\t)\r if \"shortest_edge\" in size:\r UpperCAmelCase =\t\t\t\t\t\tget_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__\t\t\t\t\t\t)\r elif \"height\" in size and \"width\" in size:\r UpperCAmelCase =\t\t\t\t\t\t(size['''height'''], size['''width'''])\r else:\r raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\"\t\t\t\t\t\t)\r return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__\t\t\t\t\t\t)\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tUnion[str, Any] , a__\t\t\t\t\t:\t\t\t\tnp.ndarray , a__\t\t\t\t\t:\t\t\t\tDict[str, int] , a__\t\t\t\t\t:\t\t\t\tOptional[Union[str, ChannelDimension]] = None , **a__\t\t\t\t\t:\t\t\t\tAny , ):\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__\t\t\t\t\t\t)\r if \"height\" not in size or \"width\" not in size:\r raise ValueError(f\"Size must have 'height' and 'width' as keys. Got {size.keys()}\"\t\t\t\t\t\t)\r return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__\t\t\t\t\t\t)\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tList[str] , a__\t\t\t\t\t:\t\t\t\tnp.ndarray , a__\t\t\t\t\t:\t\t\t\tUnion[int, float] , a__\t\t\t\t\t:\t\t\t\tbool = True , a__\t\t\t\t\t:\t\t\t\tOptional[Union[str, ChannelDimension]] = None , **a__\t\t\t\t\t:\t\t\t\tDict , ):\r UpperCAmelCase =\t\t\t\t\t\timage.astype(np.floataa\t\t\t\t\t\t)\r if offset:\r UpperCAmelCase =\t\t\t\t\t\timage - (scale / 2)\r return rescale(a__ , scale=a__ , data_format=a__ , **a__\t\t\t\t\t\t)\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tint , a__\t\t\t\t\t:\t\t\t\tnp.ndarray , a__\t\t\t\t\t:\t\t\t\tUnion[float, List[float]] , a__\t\t\t\t\t:\t\t\t\tUnion[float, List[float]] , a__\t\t\t\t\t:\t\t\t\tOptional[Union[str, ChannelDimension]] = None , **a__\t\t\t\t\t:\t\t\t\tAny , ):\r return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__\t\t\t\t\t\t)\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tAny , a__\t\t\t\t\t:\t\t\t\tImageInput , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tPILImageResampling = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tfloat = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , a__\t\t\t\t\t:\t\t\t\tOptional[ChannelDimension] = ChannelDimension.FIRST , ):\r if do_resize and size is None or resample is None:\r raise ValueError('''Size and resample must be specified if do_resize is True.'''\t\t\t\t\t\t)\r\r if do_center_crop and crop_size is None:\r raise ValueError('''Crop size must be specified if do_center_crop is True.'''\t\t\t\t\t\t)\r\r if do_rescale and rescale_factor is None:\r raise ValueError('''Rescale factor must be specified if do_rescale is True.'''\t\t\t\t\t\t)\r\r if do_normalize and (image_mean is None or image_std is None):\r raise ValueError('''Image mean and std must be specified if do_normalize is True.'''\t\t\t\t\t\t)\r\r if offset and not do_rescale:\r raise ValueError('''For offset, do_rescale must also be set to True.'''\t\t\t\t\t\t)\r\r # All transformations expect numpy arrays.\r UpperCAmelCase =\t\t\t\t\t\tto_numpy_array(a__\t\t\t\t\t\t)\r\r if do_resize:\r UpperCAmelCase =\t\t\t\t\t\tself.resize(image=a__ , size=a__ , resample=a__\t\t\t\t\t\t)\r\r if do_center_crop:\r UpperCAmelCase =\t\t\t\t\t\tself.center_crop(a__ , size=a__\t\t\t\t\t\t)\r\r if do_rescale:\r UpperCAmelCase =\t\t\t\t\t\tself.rescale(image=a__ , scale=a__ , offset=a__\t\t\t\t\t\t)\r\r if do_normalize:\r UpperCAmelCase =\t\t\t\t\t\tself.normalize(image=a__ , mean=a__ , std=a__\t\t\t\t\t\t)\r\r UpperCAmelCase =\t\t\t\t\t\tto_channel_dimension_format(a__ , a__\t\t\t\t\t\t)\r return image\r\r\r\r def \t\t\t__snake_case ( self\t\t\t\t\t:\t\t\t\tList[Any] , a__\t\t\t\t\t:\t\t\t\tImageInput , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tPILImageResampling = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tDict[str, int] = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tfloat = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tbool = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[float, List[float]]] = None , a__\t\t\t\t\t:\t\t\t\tOptional[Union[str, TensorType]] = None , a__\t\t\t\t\t:\t\t\t\tChannelDimension = ChannelDimension.FIRST , **a__\t\t\t\t\t:\t\t\t\tAny , ):\r UpperCAmelCase =\t\t\t\t\t\tdo_resize if do_resize is not None else self.do_resize\r UpperCAmelCase =\t\t\t\t\t\tresample if resample is not None else self.resample\r UpperCAmelCase =\t\t\t\t\t\tdo_center_crop if do_center_crop is not None else self.do_center_crop\r UpperCAmelCase =\t\t\t\t\t\tdo_rescale if do_rescale is not None else self.do_rescale\r UpperCAmelCase =\t\t\t\t\t\trescale_factor if rescale_factor is not None else self.rescale_factor\r UpperCAmelCase =\t\t\t\t\t\toffset if offset is not None else self.offset\r UpperCAmelCase =\t\t\t\t\t\tdo_normalize if do_normalize is not None else self.do_normalize\r UpperCAmelCase =\t\t\t\t\t\timage_mean if image_mean is not None else self.image_mean\r UpperCAmelCase =\t\t\t\t\t\timage_std if image_std is not None else self.image_std\r\r UpperCAmelCase =\t\t\t\t\t\tsize if size is not None else self.size\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__ , default_to_square=a__\t\t\t\t\t\t)\r UpperCAmelCase =\t\t\t\t\t\tcrop_size if crop_size is not None else self.crop_size\r UpperCAmelCase =\t\t\t\t\t\tget_size_dict(a__ , param_name='''crop_size'''\t\t\t\t\t\t)\r\r if not valid_images(a__\t\t\t\t\t\t):\r raise ValueError(\r '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''\r '''torch.Tensor, tf.Tensor or jax.ndarray.'''\t\t\t\t\t\t)\r\r UpperCAmelCase =\t\t\t\t\t\tmake_batched(a__\t\t\t\t\t\t)\r\r UpperCAmelCase =\t\t\t\t\t\t[\r [\r self._preprocess_image(\r image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )\r for img in video\r ]\r for video in videos\r ]\r\r UpperCAmelCase =\t\t\t\t\t\t{'''pixel_values''': videos}\r return BatchFeature(data=a__ , tensor_type=a__\t\t\t\t\t\t)\r\r\r"},"style_context_codestyle":{"kind":"number","value":51,"string":"51"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":507,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\ndef snake_case ( A__\t\t\t,A__\t\t\t,A__\t\t):\n\t\t\tif principal <= 0:\n\t\t\t\t\t\traise Exception(\"Principal borrowed must be > 0\"\t\t)\n\t\t\tif rate_per_annum < 0:\n\t\t\t\t\t\traise Exception(\"Rate of interest must be >= 0\"\t\t)\n\t\t\tif years_to_repay <= 0 or not isinstance(A__\t\t\t,A__\t\t):\n\t\t\t\t\t\traise Exception(\"Years to repay must be an integer > 0\"\t\t)\n\n\t\t\t# Yearly rate is divided by 12 to get monthly rate\n\t\t\tUpperCAmelCase_ :\t\t\t\tOptional[Any] = rate_per_annum / 12\n\n\t\t\t# Years to repay is multiplied by 12 to get number of payments as payment is monthly\n\t\t\tUpperCAmelCase_ :\t\t\t\tstr = years_to_repay * 12\n\n\t\t\treturn (\n\t\t\t principal\n\t\t\t * rate_per_month\n\t\t\t * (1 + rate_per_month) ** number_of_payments\n\t\t\t / ((1 + rate_per_month) ** number_of_payments - 1)\n\t\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\timport doctest\n\n\t\t\tdoctest.testmod()\n\n\n\n"},"code_codestyle":{"kind":"number","value":463,"string":"463"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\nfrom ..utils import DummyObject, requires_backends\n\n\n\n\n\n\n\nclass UpperCamelCase_ (metaclass=__A ):\n\t__magic_name__\t\t\t = ['''onnx''']\n\n\n\n\n\n\tdef __init__( self :\t\t\t\tList[Any]\t\t\t,\t\t\t*lowerCAmelCase_ :\t\t\t\tDict\t\t\t,\t\t\t**lowerCAmelCase_ :\t\t\t\tDict\t\t)\t\t-> Dict:\n\t\t\t\trequires_backends(self\t\t\t,\t\t\t[\"onnx\"]\t\t)\n\n\n\n\n\n\t@classmethod\n\tdef _SCREAMING_SNAKE_CASE ( cls :\t\t\t\tOptional[Any]\t\t\t,\t\t\t*lowerCAmelCase_ :\t\t\t\tDict\t\t\t,\t\t\t**lowerCAmelCase_ :\t\t\t\tUnion[str, Any]\t\t)\t\t-> int:\n\t\t\t\trequires_backends(cls\t\t\t,\t\t\t[\"onnx\"]\t\t)\n\n\n\n\n\n\t@classmethod\n\tdef _SCREAMING_SNAKE_CASE ( cls :\t\t\t\tstr\t\t\t,\t\t\t*lowerCAmelCase_ :\t\t\t\tOptional[Any]\t\t\t,\t\t\t**lowerCAmelCase_ :\t\t\t\tstr\t\t)\t\t-> Optional[Any]:\n\t\t\t\trequires_backends(cls\t\t\t,\t\t\t[\"onnx\"]\t\t)\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":463,"string":"463"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":508,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( _lowercase\t\t: list[float] )\t\t\t\t\t->bool:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tif len(_lowercase ) < 2:\r\n\t\traise ValueError(\"Monogons and Digons are not polygons in the Euclidean space\" )\r\n\tif any(i <= 0 for i in nums ):\r\n\t\traise ValueError(\"All values must be greater than 0\" )\r\n\ta :\t\t\t\t\t\tList[Any] =\t\t\t\t\t\t\tnums.copy()\r\n\tcopy_nums.sort()\r\n\treturn copy_nums[-1] < sum(copy_nums[:-1] )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\timport doctest\r\n\r\n\t\tdoctest.testmod()\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":633,"string":"633"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( _lowercase\t\t: Tuple )\t\t\t\t\t->Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\ta :\t\t\t\t\t\tAny =\t\t\t\t\t\t\t[]\r\n\ta :\t\t\t\t\t\tList[str] =\t\t\t\t\t\t\tset({\"(\", \"[\", \"{\"} )\r\n\ta :\t\t\t\t\t\tint =\t\t\t\t\t\t\tset({\")\", \"]\", \"}\"} )\r\n\ta :\t\t\t\t\t\tint =\t\t\t\t\t\t\t{\"{\": \"}\", \"[\": \"]\", \"(\": \")\"}\r\n\r\n\tfor i in range(len(_lowercase ) ):\r\n\t\tif s[i] in open_brackets:\r\n\t\t\tstack.append(s[i] )\r\n\r\n\t\telif s[i] in closed_brackets and (\r\n\t\t len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])\r\n\t\t):\r\n\t\t\treturn False\r\n\r\n\treturn len(_lowercase ) == 0\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( )\t\t\t\t\t->Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\ta :\t\t\t\t\t\tAny =\t\t\t\t\t\t\tinput(\"Enter sequence of brackets: \" )\r\n\tif is_balanced(_lowercase ):\r\n\t\tprint(_lowercase ,\t\t\t\"is balanced\" )\r\n\telse:\r\n\t\tprint(_lowercase ,\t\t\t\"is not balanced\" )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\tmain()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":633,"string":"633"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":509,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Sequence\nfrom typing import Literal\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: str\t\t\t\t\t\t\t,\t\t\t\tA__: str ) -> str | Literal[False]:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tlist(A__ )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tlist(A__ )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t0\n\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\tif lista[i] != lista[i]:\n\t\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t'_'\n\t\t\tif count > 1:\n\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\t\treturn \"\".join(A__ )\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: list[str] ) -> list[str]:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t[]\n\t\t\twhile True:\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t['$'] * len(A__ )\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t[]\n\t\t\t\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\tfor j in range(i + 1\t\t\t\t\t\t\t,\t\t\t\tlen(A__ ) ):\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tcompare_string(binary[i]\t\t\t\t\t\t\t,\t\t\t\tbinary[j] )\n\t\t\t\t\t\t\t\t\t\t\t\tif k is False:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\t'*'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\t'*'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemp.append('X' )\n\t\t\t\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\tif checka[i] == \"$\":\n\t\t\t\t\t\t\t\t\t\t\t\tpi.append(binary[i] )\n\t\t\t\t\t\tif len(A__ ) == 0:\n\t\t\t\t\t\t\t\t\treturn pi\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tlist(set(A__ ) )\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: int\t\t\t\t\t\t\t,\t\t\t\tA__: Sequence[float] ) -> list[str]:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t[]\n\t\t\tfor minterm in minterms:\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t''\n\t\t\t\t\t\tfor _ in range(A__ ):\n\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tstr(minterm % 2 ) + string\n\t\t\t\t\t\t\t\t\tminterm //= 2\n\t\t\t\t\t\ttemp.append(A__ )\n\t\t\treturn temp\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: str\t\t\t\t\t\t\t,\t\t\t\tA__: str\t\t\t\t\t\t\t,\t\t\t\tA__: int ) -> bool:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tlist(A__ )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tlist(A__ )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t0\n\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\tif lista[i] != lista[i]:\n\t\t\t\t\t\t\t\t\tcount_n += 1\n\t\t\treturn count_n == count\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: list[list[int]]\t\t\t\t\t\t\t,\t\t\t\tA__: list[str] ) -> list[str]:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t[]\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\t[0] * len(A__ )\n\t\t\tfor i in range(len(chart[0] ) ):\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t0\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t-1\n\t\t\t\t\t\tfor j in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\tif chart[j][i] == 1:\n\t\t\t\t\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\tj\n\t\t\t\t\t\tif count == 1:\n\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t1\n\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\tif select[i] == 1:\n\t\t\t\t\t\t\t\t\tfor j in range(len(chart[0] ) ):\n\t\t\t\t\t\t\t\t\t\t\t\tif chart[i][j] == 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor k in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t0\n\t\t\t\t\t\t\t\t\ttemp.append(prime_implicants[i] )\n\t\t\twhile True:\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t0\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t-1\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\t0\n\t\t\t\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tchart[i].count(1 )\n\t\t\t\t\t\t\t\t\tif count_n > max_n:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tcount_n\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\ti\n\n\t\t\t\t\t\tif max_n == 0:\n\t\t\t\t\t\t\t\t\treturn temp\n\n\t\t\t\t\t\ttemp.append(prime_implicants[rem] )\n\n\t\t\t\t\t\tfor i in range(len(chart[0] ) ):\n\t\t\t\t\t\t\t\t\tif chart[rem][i] == 1:\n\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t0\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: list[str]\t\t\t\t\t\t\t,\t\t\t\tA__: list[str] ) -> list[list[int]]:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\t[[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]\n\t\t\tfor i in range(len(A__ ) ):\n\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tprime_implicants[i].count('_' )\n\t\t\t\t\t\tfor j in range(len(A__ ) ):\n\t\t\t\t\t\t\t\t\tif is_for_table(prime_implicants[i]\t\t\t\t\t\t\t,\t\t\t\tbinary[j]\t\t\t\t\t\t\t,\t\t\t\tA__ ):\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t1\n\n\t\t\treturn chart\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\t) -> None:\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tint(input('Enter the no. of variables\\n' ) )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\t[\n\t\t\t float(A__ )\n\t\t\t for x in input(\n\t\t\t 'Enter the decimal representation of Minterms \\'Spaces Separated\\'\\n' ).split()\n\t\t\t]\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tdecimal_to_binary(A__\t\t\t\t\t\t\t,\t\t\t\tA__ )\n\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tcheck(A__ )\n\t\t\tprint('Prime Implicants are:' )\n\t\t\tprint(A__ )\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tprime_implicant_chart(A__\t\t\t\t\t\t\t,\t\t\t\tA__ )\n\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tselection(A__\t\t\t\t\t\t\t,\t\t\t\tA__ )\n\t\t\tprint('Essential Prime Implicants are:' )\n\t\t\tprint(A__ )\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\timport doctest\n\n\t\t\t\t\tdoctest.testmod()\n\t\t\t\t\tmain()\n\n\n\n"},"code_codestyle":{"kind":"number","value":263,"string":"263"},"style_context":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\nimport collections.abc\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACTaFN\nfrom ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_poolformer import PoolFormerConfig\n\n\na_\t\t\t\t: Tuple\t\t\t\t\t\t\t\t\t\t= logging.get_logger(__name__)\n\n# General docstring\na_\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t= '''PoolFormerConfig'''\n\n# Base docstring\na_\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t\t= '''sail/poolformer_s12'''\na_\t\t\t\t: List[Any]\t\t\t\t\t\t\t\t\t\t= [1, 5_12, 7, 7]\n\n# Image classification docstring\na_\t\t\t\t: Any\t\t\t\t\t\t\t\t\t\t= '''sail/poolformer_s12'''\na_\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t\t= '''tabby, tabby cat'''\n\na_\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t\t= [\n '''sail/poolformer_s12''',\n # See all PoolFormer models at https://huggingface.co/models?filter=poolformer\n]\n\n\n\n\n\ndef \t\t\t\t\t\tUpperCAmelCase\t\t\t\t(\t\t\tA__: Optional[Any]\t\t\t\t\t\t\t,\t\t\t\tA__: float = 0.0\t\t\t\t\t\t\t,\t\t\t\tA__: bool = False ) -> Tuple:\n\t\t\tif drop_prob == 0.0 or not training:\n\t\t\t\t\t\treturn input\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\t1 - drop_prob\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\t(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tkeep_prob + torch.rand(A__\t\t\t\t\t\t\t,\t\t\t\tdtype=input.dtype\t\t\t\t\t\t\t,\t\t\t\tdevice=input.device )\n\t\t\trandom_tensor.floor_() # binarize\n\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tinput.div(A__ ) * random_tensor\n\t\t\treturn output\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a = None ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tdrop_prob\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\treturn drop_path(__a\t\t\t\t\t\t,\tself.drop_prob\t\t\t\t\t\t,\tself.training )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self ):\n\t\t\t\t\t\t\t\treturn \"p={}\".format(self.drop_prob )\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a=None ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tpatch_size if isinstance(__a\t\t\t\t\t\t,\tcollections.abc.Iterable ) else (patch_size, patch_size)\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tstride if isinstance(__a\t\t\t\t\t\t,\tcollections.abc.Iterable ) else (stride, stride)\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tpadding if isinstance(__a\t\t\t\t\t\t,\tcollections.abc.Iterable ) else (padding, padding)\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tnn.Convad(__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\tkernel_size=__a\t\t\t\t\t\t,\tstride=__a\t\t\t\t\t\t,\tpadding=__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tnorm_layer(__a ) if norm_layer else nn.Identity()\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tself.projection(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\tself.norm(__a )\n\t\t\t\t\t\t\t\treturn embeddings\n\n\n\nclass __lowercase( nn.GroupNorm\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t**__a ):\n\t\t\t\t\t\t\t\tsuper().__init__(1\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t**__a )\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tnn.AvgPoolad(__a\t\t\t\t\t\t,\tstride=1\t\t\t\t\t\t,\tpadding=pool_size // 2\t\t\t\t\t\t,\tcount_include_pad=__a )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\treturn self.pool(__a ) - hidden_states\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tnn.Convad(__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t1 )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\tnn.Convad(__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t1 )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tPoolFormerDropPath(__a )\n\t\t\t\t\t\t\t\tif isinstance(config.hidden_act\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tACTaFN[config.hidden_act]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tconfig.hidden_act\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tself.conva(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\tself.act_fn(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tself.drop(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tself.conva(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tself.drop(__a )\n\n\t\t\t\t\t\t\t\treturn hidden_states\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tPoolFormerPooling(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tPoolFormerOutput(__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tPoolFormerGroupNorm(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tPoolFormerGroupNorm(__a )\n\n\t\t\t\t\t\t\t\t# Useful for training neural nets\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tPoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tconfig.use_layer_scale\n\t\t\t\t\t\t\t\tif config.use_layer_scale:\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tnn.Parameter(\n\t\t\t\t\t\t\t\t\t\t\t config.layer_scale_init_value * torch.ones((__a) )\t\t\t\t\t\t,\trequires_grad=__a )\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tnn.Parameter(\n\t\t\t\t\t\t\t\t\t\t\t config.layer_scale_init_value * torch.ones((__a) )\t\t\t\t\t\t,\trequires_grad=__a )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tif self.use_layer_scale:\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tself.pooling(self.before_norm(__a ) )\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tself.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output\n\t\t\t\t\t\t\t\t\t\t\t# First residual connection\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\thidden_states + self.drop_path(__a )\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\t()\n\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tself.output(self.after_norm(__a ) )\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tself.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output\n\t\t\t\t\t\t\t\t\t\t\t# Second residual connection\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\thidden_states + self.drop_path(__a )\n\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\t(output,) + outputs\n\t\t\t\t\t\t\t\t\t\t\treturn outputs\n\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tself.drop_path(self.pooling(self.before_norm(__a ) ) )\n\t\t\t\t\t\t\t\t\t\t\t# First residual connection\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tpooling_output + hidden_states\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t()\n\n\t\t\t\t\t\t\t\t\t\t\t# Second residual connection inside the PoolFormerOutput block\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\tself.drop_path(self.output(self.after_norm(__a ) ) )\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\thidden_states + layer_output\n\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t(output,) + outputs\n\t\t\t\t\t\t\t\t\t\t\treturn outputs\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tconfig\n\t\t\t\t\t\t\t\t# stochastic depth decay rule\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t[x.item() for x in torch.linspace(0\t\t\t\t\t\t,\tconfig.drop_path_rate\t\t\t\t\t\t,\tsum(config.depths ) )]\n\n\t\t\t\t\t\t\t\t# patch embeddings\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[str]\t =\t\t\t\t[]\n\t\t\t\t\t\t\t\tfor i in range(config.num_encoder_blocks ):\n\t\t\t\t\t\t\t\t\t\t\tembeddings.append(\n\t\t\t\t\t\t\t\t\t\t\t PoolFormerEmbeddings(\n\t\t\t\t\t\t\t\t\t\t\t patch_size=config.patch_sizes[i]\t\t\t\t\t\t,\tstride=config.strides[i]\t\t\t\t\t\t,\tpadding=config.padding[i]\t\t\t\t\t\t,\tnum_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1]\t\t\t\t\t\t,\thidden_size=config.hidden_sizes[i]\t\t\t\t\t\t,\t) )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tnn.ModuleList(__a )\n\n\t\t\t\t\t\t\t\t# Transformer blocks\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t[]\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\t0\n\t\t\t\t\t\t\t\tfor i in range(config.num_encoder_blocks ):\n\t\t\t\t\t\t\t\t\t\t\t# each block consists of layers\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\t[]\n\t\t\t\t\t\t\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcur += config.depths[i - 1]\n\t\t\t\t\t\t\t\t\t\t\tfor j in range(config.depths[i] ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlayers.append(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t PoolFormerLayer(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t,\tnum_channels=config.hidden_sizes[i]\t\t\t\t\t\t,\tpool_size=config.pool_size\t\t\t\t\t\t,\thidden_size=config.hidden_sizes[i]\t\t\t\t\t\t,\tintermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio )\t\t\t\t\t\t,\tdrop_path=dpr[cur + j]\t\t\t\t\t\t,\t) )\n\t\t\t\t\t\t\t\t\t\t\tblocks.append(nn.ModuleList(__a ) )\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tnn.ModuleList(__a )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a=False\t\t\t\t\t\t,\t__a=True ):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t() if output_hidden_states else None\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tpixel_values\n\t\t\t\t\t\t\t\tfor idx, layers in enumerate(zip(self.patch_embeddings\t\t\t\t\t\t,\tself.block ) ):\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase , __lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tlayers\n\t\t\t\t\t\t\t\t\t\t\t# Get patch embeddings from hidden_states\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tembedding_layer(__a )\n\t\t\t\t\t\t\t\t\t\t\t# Send the embeddings through the blocks\n\t\t\t\t\t\t\t\t\t\t\tfor _, blk in enumerate(__a ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tblk(__a )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tlayer_outputs[0]\n\n\t\t\t\t\t\t\t\t\t\t\tif output_hidden_states:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tall_hidden_states + (hidden_states,)\n\n\t\t\t\t\t\t\t\tif not return_dict:\n\t\t\t\t\t\t\t\t\t\t\treturn tuple(v for v in [hidden_states, all_hidden_states] if v is not None )\n\n\t\t\t\t\t\t\t\treturn BaseModelOutputWithNoAttention(last_hidden_state=__a\t\t\t\t\t\t,\thidden_states=__a )\n\n\n\nclass __lowercase( lowercase__\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t__a\t\t\t\t\t\t:\t\t\t\t\t\tTuple \t\t= PoolFormerConfig\n\t\t\t\t\t__a\t\t\t\t\t\t:\t\t\t\t\t\tTuple \t\t= 'poolformer'\n\t\t\t\t\t__a\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int] \t\t= 'pixel_values'\n\t\t\t\t\t__a\t\t\t\t\t\t:\t\t\t\t\t\tOptional[Any] \t\t= True\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tif isinstance(__a\t\t\t\t\t\t,\t(nn.Linear, nn.Convad) ):\n\t\t\t\t\t\t\t\t\t\t\tmodule.weight.data.normal_(mean=0.0\t\t\t\t\t\t,\tstd=self.config.initializer_range )\n\t\t\t\t\t\t\t\t\t\t\tif module.bias is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodule.bias.data.zero_()\n\t\t\t\t\t\t\t\telif isinstance(__a\t\t\t\t\t\t,\tnn.LayerNorm ):\n\t\t\t\t\t\t\t\t\t\t\tmodule.bias.data.zero_()\n\t\t\t\t\t\t\t\t\t\t\tmodule.weight.data.fill_(1.0 )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a\t\t\t\t\t\t,\t__a=False ):\n\t\t\t\t\t\t\t\tif isinstance(__a\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tvalue\n\n\n\na_\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t\t= R'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''\n\na_\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t\t= R'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'''\n\n@add_start_docstrings(\n 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.'\t,\tlowercase__\t,\t)\nclass __lowercase( lowercase__\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tconfig\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tPoolFormerEncoder(__a )\n\n\t\t\t\t\t\t\t\t# Initialize weights and apply final processing\n\t\t\t\t\t\t\t\tself.post_init()\n\t\t\t\t\tdef snake_case_\t\t\t\t( self ):\n\t\t\t\t\t\t\t\treturn self.embeddings.patch_embeddings\n\n\n\n\n\n\t\t\t\t\t@add_start_docstrings_to_model_forward(__a )\n\t\t\t\t\t@add_code_sample_docstrings(\n\t\t\t\t\t checkpoint=_CHECKPOINT_FOR_DOC\t\t\t\t\t\t,\toutput_type=__a\t\t\t\t\t\t,\tconfig_class=_CONFIG_FOR_DOC\t\t\t\t\t\t,\tmodality='vision'\t\t\t\t\t\t,\texpected_output=_EXPECTED_OUTPUT_SHAPE\t\t\t\t\t\t,\t)\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\t(\n\t\t\t\t\t\t\t\t output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tDict\t =\t\t\t\treturn_dict if return_dict is not None else self.config.use_return_dict\n\n\t\t\t\t\t\t\t\tif pixel_values is None:\n\t\t\t\t\t\t\t\t\t\t\traise ValueError('You have to specify pixel_values' )\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\tself.encoder(\n\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t,\toutput_hidden_states=__a\t\t\t\t\t\t,\treturn_dict=__a\t\t\t\t\t\t,\t)\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tencoder_outputs[0]\n\n\t\t\t\t\t\t\t\tif not return_dict:\n\t\t\t\t\t\t\t\t\t\t\treturn (sequence_output, None) + encoder_outputs[1:]\n\n\t\t\t\t\t\t\t\treturn BaseModelOutputWithNoAttention(\n\t\t\t\t\t\t\t\t last_hidden_state=__a\t\t\t\t\t\t,\thidden_states=encoder_outputs.hidden_states\t\t\t\t\t\t,\t)\n\n\n\nclass __lowercase( nn.Module\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tnn.Linear(config.hidden_size\t\t\t\t\t\t,\tconfig.hidden_size )\n\n\n\n\n\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tself.dense(__a )\n\t\t\t\t\t\t\t\treturn output\n\n\n\n@add_start_docstrings(\n '\\n PoolFormer Model transformer with an image classification head on top\\n '\t,\tlowercase__\t,\t)\nclass __lowercase( lowercase__\t\t\t\t):\n\n\n\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\tdef __init__( self\t\t\t\t\t\t,\t__a ):\n\t\t\t\t\t\t\t\tsuper().__init__(__a )\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tconfig.num_labels\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tPoolFormerModel(__a )\n\n\t\t\t\t\t\t\t\t# Final norm\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tstr\t =\t\t\t\tPoolFormerGroupNorm(config.hidden_sizes[-1] )\n\t\t\t\t\t\t\t\t# Classifier head\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\t(\n\t\t\t\t\t\t\t\t nn.Linear(config.hidden_sizes[-1]\t\t\t\t\t\t,\tconfig.num_labels ) if config.num_labels > 0 else nn.Identity()\n\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t# Initialize weights and apply final processing\n\t\t\t\t\t\t\t\tself.post_init()\n\n\n\n\n\n\t\t\t\t\t@add_start_docstrings_to_model_forward(__a )\n\t\t\t\t\t@add_code_sample_docstrings(\n\t\t\t\t\t checkpoint=_IMAGE_CLASS_CHECKPOINT\t\t\t\t\t\t,\toutput_type=__a\t\t\t\t\t\t,\tconfig_class=_CONFIG_FOR_DOC\t\t\t\t\t\t,\texpected_output=_IMAGE_CLASS_EXPECTED_OUTPUT\t\t\t\t\t\t,\t)\n\t\t\t\t\tdef snake_case_\t\t\t\t( self\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t__a = None\t\t\t\t\t\t,\t):\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\treturn_dict if return_dict is not None else self.config.use_return_dict\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tself.poolformer(\n\t\t\t\t\t\t\t\t __a\t\t\t\t\t\t,\toutput_hidden_states=__a\t\t\t\t\t\t,\treturn_dict=__a\t\t\t\t\t\t,\t)\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\toutputs[0]\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[int]\t =\t\t\t\tself.classifier(self.norm(__a ).mean([-2, -1] ) )\n\n\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tUnion[str, Any]\t =\t\t\t\tNone\n\t\t\t\t\t\t\t\tif labels is not None:\n\t\t\t\t\t\t\t\t\t\t\tif self.config.problem_type is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.num_labels == 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t'regression'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tAny\t =\t\t\t\t'single_label_classification'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\t'multi_label_classification'\n\n\t\t\t\t\t\t\t\t\t\t\tif self.config.problem_type == \"regression\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tMSELoss()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.num_labels == 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tloss_fct(logits.squeeze()\t\t\t\t\t\t,\tlabels.squeeze() )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tloss_fct(__a\t\t\t\t\t\t,\t__a )\n\t\t\t\t\t\t\t\t\t\t\telif self.config.problem_type == \"single_label_classification\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tTuple\t =\t\t\t\tCrossEntropyLoss()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tint\t =\t\t\t\tloss_fct(logits.view(-1\t\t\t\t\t\t,\tself.num_labels )\t\t\t\t\t\t,\tlabels.view(-1 ) )\n\t\t\t\t\t\t\t\t\t\t\telif self.config.problem_type == \"multi_label_classification\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tList[Any]\t =\t\t\t\tBCEWithLogitsLoss()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\tloss_fct(__a\t\t\t\t\t\t,\t__a )\n\n\t\t\t\t\t\t\t\tif not return_dict:\n\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase\t\t\t\t\t:\tOptional[Any]\t =\t\t\t\t(logits,) + outputs[2:]\n\t\t\t\t\t\t\t\t\t\t\treturn ((loss,) + output) if loss is not None else output\n\n\t\t\t\t\t\t\t\treturn ImageClassifierOutputWithNoAttention(loss=__a\t\t\t\t\t\t,\tlogits=__a\t\t\t\t\t\t,\thidden_states=outputs.hidden_states )\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":263,"string":"263"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":510,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport warnings\r\nfrom typing import List, Optional, Union\r\n\r\nfrom ...processing_utils import ProcessorMixin\r\nfrom ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy\r\nfrom ...utils import TensorType\r\n\r\n\r\nclass \tlowerCamelCase__\t( _A):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n a__\t\t\t\t\t: List[str] = [\"image_processor\", \"tokenizer\"]\r\n a__\t\t\t\t\t: Optional[Any] = \"LayoutLMv3ImageProcessor\"\r\n a__\t\t\t\t\t: Union[str, Any] = (\"LayoutLMv3Tokenizer\", \"LayoutLMv3TokenizerFast\")\r\n\r\n def __init__( self\t\t\t\t\t:\t\t\t\t\tstr\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tAny=None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tAny=None\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[int] ) ->\tAny:\r\n _A = None\r\n if \"feature_extractor\" in kwargs:\r\n warnings.warn(\r\n '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''\r\n ''' instead.'''\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t,\t\t\t\t\t\t)\r\n _A = kwargs.pop('''feature_extractor''' )\r\n\r\n _A = image_processor if image_processor is not None else feature_extractor\r\n if image_processor is None:\r\n raise ValueError('''You need to specify an `image_processor`.''' )\r\n if tokenizer is None:\r\n raise ValueError('''You need to specify a `tokenizer`.''' )\r\n\r\n super().__init__(__lowerCAmelCase\t\t\t,\t\t\t\t\t\t__lowerCAmelCase )\r\n\r\n def __call__( self\t\t\t\t\t:\t\t\t\t\tList[Any]\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[int]\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tUnion[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tUnion[List[List[int]], List[List[List[int]]]] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Union[List[int], List[List[int]]]] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = True\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tUnion[bool, str, PaddingStrategy] = False\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tUnion[bool, str, TruncationStrategy] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[int] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tint = 0\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[int] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[bool] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[bool] = None\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = False\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = False\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = False\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = False\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tbool = True\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Union[str, TensorType]] = None\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tint\t\t\t,\t\t\t\t\t\t) ->\tBatchEncoding:\r\n # verify input\r\n if self.image_processor.apply_ocr and (boxes is not None):\r\n raise ValueError(\r\n '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )\r\n\r\n if self.image_processor.apply_ocr and (word_labels is not None):\r\n raise ValueError(\r\n '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )\r\n\r\n # first, apply the image processor\r\n _A = self.image_processor(images=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_tensors=__lowerCAmelCase )\r\n\r\n # second, apply the tokenizer\r\n if text is not None and self.image_processor.apply_ocr and text_pair is None:\r\n if isinstance(__lowerCAmelCase\t\t\t,\t\t\t\t\t\t__lowerCAmelCase ):\r\n _A = [text] # add batch dimension (as the image processor always adds a batch dimension)\r\n _A = features['''words''']\r\n\r\n _A = self.tokenizer(\r\n text=text if text is not None else features['''words''']\t\t\t,\t\t\t\t\t\ttext_pair=text_pair if text_pair is not None else None\t\t\t,\t\t\t\t\t\tboxes=boxes if boxes is not None else features['''boxes''']\t\t\t,\t\t\t\t\t\tword_labels=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tadd_special_tokens=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tpadding=__lowerCAmelCase\t\t\t,\t\t\t\t\t\ttruncation=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tmax_length=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tstride=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tpad_to_multiple_of=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_token_type_ids=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_attention_mask=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_overflowing_tokens=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_special_tokens_mask=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_offsets_mapping=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_length=__lowerCAmelCase\t\t\t,\t\t\t\t\t\tverbose=__lowerCAmelCase\t\t\t,\t\t\t\t\t\treturn_tensors=__lowerCAmelCase\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase\t\t\t,\t\t\t\t\t\t)\r\n\r\n # add pixel values\r\n _A = features.pop('''pixel_values''' )\r\n if return_overflowing_tokens is True:\r\n _A = self.get_overflowing_images(__lowerCAmelCase\t\t\t,\t\t\t\t\t\tencoded_inputs['''overflow_to_sample_mapping'''] )\r\n _A = images\r\n\r\n return encoded_inputs\r\n\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tAny\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tList[Any]\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Any] ) ->\tint:\r\n # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image\r\n _A = []\r\n for sample_idx in overflow_to_sample_mapping:\r\n images_with_overflow.append(images[sample_idx] )\r\n\r\n if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):\r\n raise ValueError(\r\n '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''\r\n f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )\r\n\r\n return images_with_overflow\r\n\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tList[Any]\t\t\t,\t\t\t\t\t\t*__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tstr\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tstr ) ->\tOptional[int]:\r\n return self.tokenizer.batch_decode(*__lowerCAmelCase\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase )\r\n\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tOptional[int]\t\t\t,\t\t\t\t\t\t*__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Any]\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase\t\t\t\t\t:\t\t\t\t\tOptional[Any] ) ->\tstr:\r\n return self.tokenizer.decode(*__lowerCAmelCase\t\t\t,\t\t\t\t\t\t**__lowerCAmelCase )\r\n\r\n @property\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tList[str] ) ->\tAny:\r\n return [\"input_ids\", \"bbox\", \"attention_mask\", \"pixel_values\"]\r\n\r\n @property\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tOptional[Any] ) ->\tList[str]:\r\n warnings.warn(\r\n '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.'''\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t,\t\t\t\t\t\t)\r\n return self.image_processor_class\r\n\r\n @property\r\n def snake_case_ ( self\t\t\t\t\t:\t\t\t\t\tList[Any] ) ->\tTuple:\r\n warnings.warn(\r\n '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.'''\t\t\t,\t\t\t\t\t\t__lowerCAmelCase\t\t\t,\t\t\t\t\t\t)\r\n return self.image_processor\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":2,"string":"2"},"style_context":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\r\rdef A__\t(\t\t\t\t\t\t\tA : int\t\t\t\t\t\t\t, A : int):\r\r\r\t\t\t'''simple docstring'''\r\r\r\t\t\treturn int((input_a, input_a).count(0) != 0)\rdef A__\t(\t\t\t\t\t\t\t):\r\r\r\t\t\t'''simple docstring'''\r\r\r\t\t\tassert nand_gate(0\t\t\t\t\t\t\t, 0) == 1\r\t\t\tassert nand_gate(0\t\t\t\t\t\t\t, 1) == 1\r\t\t\tassert nand_gate(1\t\t\t\t\t\t\t, 0) == 1\r\t\t\tassert nand_gate(1\t\t\t\t\t\t\t, 1) == 0\r\r\rif __name__ == \"__main__\":\r\t\t\t\tprint(nand_gate(0, 0))\r\t\t\t\tprint(nand_gate(0, 1))\r\t\t\t\tprint(nand_gate(1, 0))\r\t\t\t\tprint(nand_gate(1, 1))\r"},"style_context_codestyle":{"kind":"number","value":173,"string":"173"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":511,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rimport gc\rimport unittest\r\rimport numpy as np\rimport torch\r\rfrom diffusers import (\r AudioDiffusionPipeline,\r AutoencoderKL,\r DDIMScheduler,\r DDPMScheduler,\r DiffusionPipeline,\r Mel,\r UNetaDConditionModel,\r UNetaDModel,\r)\rfrom diffusers.utils import slow, torch_device\rfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\r\renable_full_determinism()\r\rclass __A (\t\t\t\t\t\tunittest.TestCase ):\r\r\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t# clean up the VRAM after each test\r\t\t\t\t\t\t\t\tsuper().tearDown()\r\t\t\t\t\t\t\t\tgc.collect()\r\t\t\t\t\t\t\t\ttorch.cuda.empty_cache()\r\r\r\t\t\t\t@property\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\ttorch.manual_seed(0\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =UNetaDModel(\r\t\t\t\t\t\t\t\t sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=(\"\"\"AttnDownBlock2D\"\"\", \"\"\"DownBlock2D\"\"\") , up_block_types=(\"\"\"UpBlock2D\"\"\", \"\"\"AttnUpBlock2D\"\"\") , )\r\t\t\t\t\t\t\t\treturn model\r\r\r\t\t\t\t@property\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\ttorch.manual_seed(0\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =UNetaDConditionModel(\r\t\t\t\t\t\t\t\t sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=(\"\"\"CrossAttnDownBlock2D\"\"\", \"\"\"DownBlock2D\"\"\") , up_block_types=(\"\"\"UpBlock2D\"\"\", \"\"\"CrossAttnUpBlock2D\"\"\") , cross_attention_dim=10 , )\r\t\t\t\t\t\t\t\treturn model\r\r\r\t\t\t\t@property\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\ttorch.manual_seed(0\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =AutoencoderKL(\r\t\t\t\t\t\t\t\t sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=(\"\"\"DownEncoderBlock2D\"\"\", \"\"\"DownEncoderBlock2D\"\"\") , up_block_types=(\"\"\"UpDecoderBlock2D\"\"\", \"\"\"UpDecoderBlock2D\"\"\") , )\r\t\t\t\t\t\t\t\tlowerCamelCase\t =UNetaDModel(\r\t\t\t\t\t\t\t\t sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=(\"\"\"AttnDownBlock2D\"\"\", \"\"\"DownBlock2D\"\"\") , up_block_types=(\"\"\"UpBlock2D\"\"\", \"\"\"AttnUpBlock2D\"\"\") , )\r\t\t\t\t\t\t\t\treturn vqvae, unet\r\r\r\t\t\t\t@slow\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\tlowerCamelCase\t =\"\"\"cpu\"\"\" # ensure determinism for the device-dependent torch.Generator\r\t\t\t\t\t\t\t\tlowerCamelCase\t =Mel(\r\t\t\t\t\t\t\t\t x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =DDPMScheduler()\r\t\t\t\t\t\t\t\tlowerCamelCase\t =AudioDiffusionPipeline(vqvae=UpperCAmelCase_ , unet=self.dummy_unet , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe.to(UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=UpperCAmelCase_\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch.Generator(device=UpperCAmelCase_\t\t\t\t\t\t\t).manual_seed(42\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe(generator=UpperCAmelCase_ , steps=4\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.audios[0]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.images[0]\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch.Generator(device=UpperCAmelCase_\t\t\t\t\t\t\t).manual_seed(42\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe(generator=UpperCAmelCase_ , steps=4 , return_dict=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output[0][0]\r\r\t\t\t\t\t\t\t\tassert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)\r\t\t\t\t\t\t\t\tassert (\r\t\t\t\t\t\t\t\t image.height == self.dummy_unet.config.sample_size[0]\r\t\t\t\t\t\t\t\t and image.width == self.dummy_unet.config.sample_size[1]\r\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.frombuffer(image.tobytes() , dtype=\"\"\"uint8\"\"\"\t\t\t\t\t\t\t)[:10]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.frombuffer(image_from_tuple.tobytes() , dtype=\"\"\"uint8\"\"\"\t\t\t\t\t\t\t)[:10]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127]\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t\t\t\t\t\t).max() == 0\r\t\t\t\t\t\t\t\tassert np.abs(image_from_tuple_slice.flatten() - expected_slice\t\t\t\t\t\t\t).max() == 0\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =Mel(\r\t\t\t\t\t\t\t\t x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =DDIMScheduler()\r\t\t\t\t\t\t\t\tlowerCamelCase\t =self.dummy_vqvae_and_unet\r\t\t\t\t\t\t\t\tlowerCamelCase\t =AudioDiffusionPipeline(\r\t\t\t\t\t\t\t\t vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe.to(UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=UpperCAmelCase_\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tnp.random.seed(0\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,)\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch.Generator(device=UpperCAmelCase_\t\t\t\t\t\t\t).manual_seed(42\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe(raw_audio=UpperCAmelCase_ , generator=UpperCAmelCase_ , start_step=5 , steps=10\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.images[0]\r\r\t\t\t\t\t\t\t\tassert (\r\t\t\t\t\t\t\t\t image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]\r\t\t\t\t\t\t\t\t and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]\r\t\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.frombuffer(image.tobytes() , dtype=\"\"\"uint8\"\"\"\t\t\t\t\t\t\t)[:10]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121]\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t\t\t\t\t\t).max() == 0\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =self.dummy_unet_condition\r\t\t\t\t\t\t\t\tlowerCamelCase\t =AudioDiffusionPipeline(\r\t\t\t\t\t\t\t\t vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase_ , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe.to(UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=UpperCAmelCase_\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tnp.random.seed(0\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch.rand((1, 1, 10)\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe(generator=UpperCAmelCase_ , encoding=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.images[0]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.frombuffer(image.tobytes() , dtype=\"\"\"uint8\"\"\"\t\t\t\t\t\t\t)[:10]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111]\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t\t\t\t\t\t).max() == 0\r\r\r\r\r@slow\r@require_torch_gpu\rclass __A (\t\t\t\t\t\tunittest.TestCase ):\r\r\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\t# clean up the VRAM after each test\r\t\t\t\t\t\t\t\tsuper().tearDown()\r\t\t\t\t\t\t\t\tgc.collect()\r\t\t\t\t\t\t\t\ttorch.cuda.empty_cache()\r\r\r\t\t\t\tdef _snake_case (\t\tself\t\t\t\t\t\t\t):\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch_device\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =DiffusionPipeline.from_pretrained(\"\"\"teticio/audio-diffusion-ddim-256\"\"\"\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe.to(UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tpipe.set_progress_bar_config(disable=UpperCAmelCase_\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tlowerCamelCase\t =torch.Generator(device=UpperCAmelCase_\t\t\t\t\t\t\t).manual_seed(42\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =pipe(generator=UpperCAmelCase_\t\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.audios[0]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =output.images[0]\r\r\t\t\t\t\t\t\t\tassert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)\r\t\t\t\t\t\t\t\tassert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.frombuffer(image.tobytes() , dtype=\"\"\"uint8\"\"\"\t\t\t\t\t\t\t)[:10]\r\t\t\t\t\t\t\t\tlowerCamelCase\t =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26]\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\t\t\tassert np.abs(image_slice.flatten() - expected_slice\t\t\t\t\t\t\t).max() == 0\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":269,"string":"269"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rfrom typing import TYPE_CHECKING\r\rfrom ...utils import (\r OptionalDependencyNotAvailable,\r _LazyModule,\r is_sentencepiece_available,\r is_tokenizers_available,\r is_torch_available,\r)\r\r\rUpperCAmelCase__\t\t\t\t\t\t:\t\t\tstr\t\t\t\t\t\t\t\t\t={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}\r\rtry:\r\t\t\t\t\t\t\tif not is_sentencepiece_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\tpass\relse:\r\t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t:\t\t\tList[Any]\t\t\t\t\t\t\t\t\t=['''FNetTokenizer''']\r\rtry:\r\t\t\t\t\t\t\tif not is_tokenizers_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\tpass\relse:\r\t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t:\t\t\tAny\t\t\t\t\t\t\t\t\t=['''FNetTokenizerFast''']\r\rtry:\r\t\t\t\t\t\t\tif not is_torch_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\rexcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\tpass\relse:\r\t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t:\t\t\tDict\t\t\t\t\t\t\t\t\t=[\r\t\t\t\t\t\t\t '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',\r\t\t\t\t\t\t\t '''FNetForMaskedLM''',\r\t\t\t\t\t\t\t '''FNetForMultipleChoice''',\r\t\t\t\t\t\t\t '''FNetForNextSentencePrediction''',\r\t\t\t\t\t\t\t '''FNetForPreTraining''',\r\t\t\t\t\t\t\t '''FNetForQuestionAnswering''',\r\t\t\t\t\t\t\t '''FNetForSequenceClassification''',\r\t\t\t\t\t\t\t '''FNetForTokenClassification''',\r\t\t\t\t\t\t\t '''FNetLayer''',\r\t\t\t\t\t\t\t '''FNetModel''',\r\t\t\t\t\t\t\t '''FNetPreTrainedModel''',\r\t\t\t\t\t\t\t]\r\r\rif TYPE_CHECKING:\r\t\t\t\t\t\t\tfrom .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig\r\r\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not is_sentencepiece_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfrom .tokenization_fnet import FNetTokenizer\r\r\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not is_tokenizers_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfrom .tokenization_fnet_fast import FNetTokenizerFast\r\r\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\t\t\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tpass\r\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tfrom .modeling_fnet import (\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNET_PRETRAINED_MODEL_ARCHIVE_LIST,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForMaskedLM,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForMultipleChoice,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForNextSentencePrediction,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForPreTraining,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForQuestionAnswering,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForSequenceClassification,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetForTokenClassification,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetLayer,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetModel,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t FNetPreTrainedModel,\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\r\r\relse:\r\t\t\t\t\t\t\timport sys\r\r\t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t\t\t:\t\t\tOptional[int]\t\t\t\t\t\t\t\t\t=_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":269,"string":"269"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":512,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport torch\r\n\r\nfrom transformers.utils import WEIGHTS_NAME\r\n\r\n\r\nlowerCAmelCase :\tOptional[int] \t\t\t\t=['''small''', '''medium''', '''large''']\r\n\r\nlowerCAmelCase :\tList[str] \t\t\t\t='''lm_head.decoder.weight'''\r\nlowerCAmelCase :\tOptional[Any] \t\t\t\t='''lm_head.weight'''\r\ndef \t\t\t\tUpperCAmelCase_ (\t\t__lowerCamelCase : str ,__lowerCamelCase : str\t\t\t\t\t):\r\n lowercase_ :Any =\t\t\t\t\ttorch.load(__lowerCamelCase\t\t\t\t\t)\r\n lowercase_ :Tuple =\t\t\t\t\td.pop(__lowerCamelCase\t\t\t\t\t)\r\n os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase\t\t\t\t\t)\r\n torch.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,__lowerCamelCase\t\t\t\t\t)\t\t\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lowerCAmelCase :\tint \t\t\t\t=argparse.ArgumentParser()\r\n parser.add_argument('''--dialogpt_path''', default='''.''', type=str)\r\n lowerCAmelCase :\tList[str] \t\t\t\t=parser.parse_args()\r\n for MODEL in DIALOGPT_MODELS:\r\n lowerCAmelCase :\tint \t\t\t\t=os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')\r\n lowerCAmelCase :\tList[str] \t\t\t\t=F'''./DialoGPT-{MODEL}'''\r\n convert_dialogpt_checkpoint(\r\n checkpoint_path,\r\n pytorch_dump_folder_path,\r\n )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":172,"string":"172"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\nlowerCAmelCase :\tDict \t\t\t\t=logging.get_logger(__name__)\r\n\r\nlowerCAmelCase :\tList[Any] \t\t\t\t={\r\n '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',\r\n '''distilbert-base-uncased-distilled-squad''': (\r\n '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''\r\n ),\r\n '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',\r\n '''distilbert-base-cased-distilled-squad''': (\r\n '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''\r\n ),\r\n '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',\r\n '''distilbert-base-multilingual-cased''': (\r\n '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''\r\n ),\r\n '''distilbert-base-uncased-finetuned-sst-2-english''': (\r\n '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''\r\n ),\r\n}\r\nclass a_ (\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t):\r\n __A\t\t\t\t\t\t= \"distilbert\"\r\n __A\t\t\t\t\t\t= {\r\n \"hidden_size\": \"dim\",\r\n \"num_attention_heads\": \"n_heads\",\r\n \"num_hidden_layers\": \"n_layers\",\r\n }\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t\t\t: int , lowercase\t\t\t\t\t\t: Union[str, Any]=30_522 , lowercase\t\t\t\t\t\t: List[Any]=512 , lowercase\t\t\t\t\t\t: Tuple=False , lowercase\t\t\t\t\t\t: Dict=6 , lowercase\t\t\t\t\t\t: List[str]=12 , lowercase\t\t\t\t\t\t: Union[str, Any]=768 , lowercase\t\t\t\t\t\t: int=4 * 768 , lowercase\t\t\t\t\t\t: Union[str, Any]=0.1 , lowercase\t\t\t\t\t\t: List[str]=0.1 , lowercase\t\t\t\t\t\t: List[str]=\"gelu\" , lowercase\t\t\t\t\t\t: Tuple=0.02 , lowercase\t\t\t\t\t\t: int=0.1 , lowercase\t\t\t\t\t\t: Any=0.2 , lowercase\t\t\t\t\t\t: List[Any]=0 , **lowercase\t\t\t\t\t\t: Optional[Any] , ):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n lowercase_ :Optional[int] =\t\t\t\t\tvocab_size\r\n lowercase_ :Optional[int] =\t\t\t\t\tmax_position_embeddings\r\n lowercase_ :List[Any] =\t\t\t\t\tsinusoidal_pos_embds\r\n lowercase_ :Dict =\t\t\t\t\tn_layers\r\n lowercase_ :List[str] =\t\t\t\t\tn_heads\r\n lowercase_ :int =\t\t\t\t\tdim\r\n lowercase_ :str =\t\t\t\t\thidden_dim\r\n lowercase_ :Tuple =\t\t\t\t\tdropout\r\n lowercase_ :Any =\t\t\t\t\tattention_dropout\r\n lowercase_ :Optional[int] =\t\t\t\t\tactivation\r\n lowercase_ :Dict =\t\t\t\t\tinitializer_range\r\n lowercase_ :int =\t\t\t\t\tqa_dropout\r\n lowercase_ :Tuple =\t\t\t\t\tseq_classif_dropout\r\n super().__init__(**lowercase , pad_token_id=lowercase\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass a_ (\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def \t\t\t\tlowercase__ (\t\t\t\t\tself\t\t\t\t\t\t: Union[str, Any]\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n if self.task == \"multiple-choice\":\r\n lowercase_ :int =\t\t\t\t\t{0: \"batch\", 1: \"choice\", 2: \"sequence\"}\r\n else:\r\n lowercase_ :Tuple =\t\t\t\t\t{0: \"batch\", 1: \"sequence\"}\r\n return OrderedDict(\r\n [\r\n (\"input_ids\", dynamic_axis),\r\n (\"attention_mask\", dynamic_axis),\r\n ]\t\t)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":172,"string":"172"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":513,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\rimport gc\rimport random\rimport unittest\r\rimport numpy as np\rimport torch\rfrom PIL import Image\rfrom transformers import XLMRobertaTokenizerFast\r\rfrom diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel\rfrom diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP\rfrom diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device\rfrom diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu\r\rfrom ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference\r\r\renable_full_determinism()\r\r\r\r\r\rclass \t\t\t\tlowerCAmelCase (\t\t\t\t\t\t\ta ,\tunittest.TestCase\t\t\t\t):\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowercase\t:str\t\t\t\t\t =\t\t\t\t\t\tKandinskyImgaImgPipeline\r __lowercase\t:Tuple\t\t\t\t\t =\t\t\t\t\t\t[\"prompt\", \"image_embeds\", \"negative_image_embeds\", \"image\"]\r __lowercase\t:Optional[Any]\t\t\t\t\t =\t\t\t\t\t\t[\r \"prompt\",\r \"negative_prompt\",\r \"image_embeds\",\r \"negative_image_embeds\",\r \"image\",\r ]\r __lowercase\t:Any\t\t\t\t\t =\t\t\t\t\t\t[\r \"generator\",\r \"height\",\r \"width\",\r \"strength\",\r \"guidance_scale\",\r \"negative_prompt\",\r \"num_inference_steps\",\r \"return_dict\",\r \"guidance_scale\",\r \"num_images_per_prompt\",\r \"output_type\",\r \"return_dict\",\r ]\r __lowercase\t:Tuple\t\t\t\t\t =\t\t\t\t\t\tFalse\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return 32\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Any:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return 32\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return self.time_input_dim\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> List[str]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return self.time_input_dim * 4\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return 100\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Any:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\tXLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )\r return tokenizer\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Optional[Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r torch.manual_seed(0 )\r lowerCamelCase_\t\t\t\t\t =\t\tMCLIPConfig(\r numDims=self.cross_attention_dim ,\t\ttransformerDimensions=self.text_embedder_hidden_size ,\t\thidden_size=self.text_embedder_hidden_size ,\t\tintermediate_size=37 ,\t\tnum_attention_heads=4 ,\t\tnum_hidden_layers=5 ,\t\tvocab_size=1_005 ,\t\t)\r\r lowerCamelCase_\t\t\t\t\t =\t\tMultilingualCLIP(UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\ttext_encoder.eval()\r\r return text_encoder\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Any:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r torch.manual_seed(0 )\r\r lowerCamelCase_\t\t\t\t\t =\t\t{\r '''in_channels''': 4,\r # Out channels is double in channels because predicts mean and variance\r '''out_channels''': 8,\r '''addition_embed_type''': '''text_image''',\r '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),\r '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),\r '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',\r '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),\r '''layers_per_block''': 1,\r '''encoder_hid_dim''': self.text_embedder_hidden_size,\r '''encoder_hid_dim_type''': '''text_image_proj''',\r '''cross_attention_dim''': self.cross_attention_dim,\r '''attention_head_dim''': 4,\r '''resnet_time_scale_shift''': '''scale_shift''',\r '''class_embed_type''': None,\r }\r\r lowerCamelCase_\t\t\t\t\t =\t\tUNetaDConditionModel(**UpperCamelCase__ )\r return model\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Tuple:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r return {\r \"block_out_channels\": [32, 64],\r \"down_block_types\": [\"DownEncoderBlock2D\", \"AttnDownEncoderBlock2D\"],\r \"in_channels\": 3,\r \"latent_channels\": 4,\r \"layers_per_block\": 1,\r \"norm_num_groups\": 8,\r \"norm_type\": \"spatial\",\r \"num_vq_embeddings\": 12,\r \"out_channels\": 3,\r \"up_block_types\": [\r \"AttnUpDecoderBlock2D\",\r \"UpDecoderBlock2D\",\r ],\r \"vq_embed_dim\": 4,\r }\r\r\r\r\r @property\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Tuple:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r torch.manual_seed(0 )\r lowerCamelCase_\t\t\t\t\t =\t\tVQModel(**self.dummy_movq_kwargs )\r return model\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> int:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\tself.dummy_text_encoder\r lowerCamelCase_\t\t\t\t\t =\t\tself.dummy_tokenizer\r lowerCamelCase_\t\t\t\t\t =\t\tself.dummy_unet\r lowerCamelCase_\t\t\t\t\t =\t\tself.dummy_movq\r\r lowerCamelCase_\t\t\t\t\t =\t\t{\r '''num_train_timesteps''': 1_000,\r '''beta_schedule''': '''linear''',\r '''beta_start''': 0.00_085,\r '''beta_end''': 0.012,\r '''clip_sample''': False,\r '''set_alpha_to_one''': False,\r '''steps_offset''': 0,\r '''prediction_type''': '''epsilon''',\r '''thresholding''': False,\r }\r\r lowerCamelCase_\t\t\t\t\t =\t\tDDIMScheduler(**UpperCamelCase__ )\r\r lowerCamelCase_\t\t\t\t\t =\t\t{\r '''text_encoder''': text_encoder,\r '''tokenizer''': tokenizer,\r '''unet''': unet,\r '''scheduler''': scheduler,\r '''movq''': movq,\r }\r\r return components\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself ,\t\tUpperCamelCase__ ,\t\tUpperCamelCase__=0 )\t\t\t\t-> Any:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\tfloats_tensor((1, self.cross_attention_dim) ,\t\trng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\tfloats_tensor((1, self.cross_attention_dim) ,\t\trng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )\r # create init_image\r lowerCamelCase_\t\t\t\t\t =\t\tfloats_tensor((1, 3, 64, 64) ,\t\trng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\timage.cpu().permute(0 ,\t\t2 ,\t\t3 ,\t\t1 )[0]\r lowerCamelCase_\t\t\t\t\t =\t\tImage.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )\r\r if str(UpperCamelCase__ ).startswith('''mps''' ):\r lowerCamelCase_\t\t\t\t\t =\t\ttorch.manual_seed(UpperCamelCase__ )\r else:\r lowerCamelCase_\t\t\t\t\t =\t\ttorch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\t{\r '''prompt''': '''horse''',\r '''image''': init_image,\r '''image_embeds''': image_embeds,\r '''negative_image_embeds''': negative_image_embeds,\r '''generator''': generator,\r '''height''': 64,\r '''width''': 64,\r '''num_inference_steps''': 10,\r '''guidance_scale''': 7.0,\r '''strength''': 0.2,\r '''output_type''': '''np''',\r }\r return inputs\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> List[Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\t'''cpu'''\r\r lowerCamelCase_\t\t\t\t\t =\t\tself.get_dummy_components()\r\r lowerCamelCase_\t\t\t\t\t =\t\tself.pipeline_class(**UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\tpipe.to(UpperCamelCase__ )\r\r pipe.set_progress_bar_config(disable=UpperCamelCase__ )\r\r lowerCamelCase_\t\t\t\t\t =\t\tpipe(**self.get_dummy_inputs(UpperCamelCase__ ) )\r lowerCamelCase_\t\t\t\t\t =\t\toutput.images\r\r lowerCamelCase_\t\t\t\t\t =\t\tpipe(\r **self.get_dummy_inputs(UpperCamelCase__ ) ,\t\treturn_dict=UpperCamelCase__ ,\t\t)[0]\r\r lowerCamelCase_\t\t\t\t\t =\t\timage[0, -3:, -3:, -1]\r lowerCamelCase_\t\t\t\t\t =\t\timage_from_tuple[0, -3:, -3:, -1]\r\r assert image.shape == (1, 64, 64, 3)\r\r lowerCamelCase_\t\t\t\t\t =\t\tnp.array(\r [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )\r assert (\r np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2\r ), F\"\"\" expected_slice {expected_slice}, but got {image_slice.flatten()}\"\"\"\r assert (\r np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2\r ), F\"\"\" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}\"\"\"\r\r\r\r\r\r@slow\r@require_torch_gpu\rclass \t\t\t\tlowerCAmelCase (\t\t\t\t\t\t\tunittest.TestCase\t\t\t\t):\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> Union[str, Any]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r super().tearDown()\r gc.collect()\r torch.cuda.empty_cache()\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> List[str]:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\tload_numpy(\r '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''\r '''/kandinsky/kandinsky_img2img_frog.npy''' )\r\r lowerCamelCase_\t\t\t\t\t =\t\tload_image(\r '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )\r lowerCamelCase_\t\t\t\t\t =\t\t'''A red cartoon frog, 4k'''\r\r lowerCamelCase_\t\t\t\t\t =\t\tKandinskyPriorPipeline.from_pretrained(\r '''kandinsky-community/kandinsky-2-1-prior''' ,\t\ttorch_dtype=torch.floataa )\r pipe_prior.to(UpperCamelCase__ )\r\r lowerCamelCase_\t\t\t\t\t =\t\tKandinskyImgaImgPipeline.from_pretrained(\r '''kandinsky-community/kandinsky-2-1''' ,\t\ttorch_dtype=torch.floataa )\r lowerCamelCase_\t\t\t\t\t =\t\tpipeline.to(UpperCamelCase__ )\r\r pipeline.set_progress_bar_config(disable=UpperCamelCase__ )\r\r lowerCamelCase_\t\t\t\t\t =\t\ttorch.Generator(device='''cpu''' ).manual_seed(0 )\r lowerCamelCase_ , lowerCamelCase_\t\t\t\t\t =\t\tpipe_prior(\r UpperCamelCase__ ,\t\tgenerator=UpperCamelCase__ ,\t\tnum_inference_steps=5 ,\t\tnegative_prompt='''''' ,\t\t).to_tuple()\r\r lowerCamelCase_\t\t\t\t\t =\t\tpipeline(\r UpperCamelCase__ ,\t\timage=UpperCamelCase__ ,\t\timage_embeds=UpperCamelCase__ ,\t\tnegative_image_embeds=UpperCamelCase__ ,\t\tgenerator=UpperCamelCase__ ,\t\tnum_inference_steps=100 ,\t\theight=768 ,\t\twidth=768 ,\t\tstrength=0.2 ,\t\toutput_type='''np''' ,\t\t)\r\r lowerCamelCase_\t\t\t\t\t =\t\toutput.images[0]\r\r assert image.shape == (768, 768, 3)\r\r assert_mean_pixel_difference(UpperCamelCase__ ,\t\tUpperCamelCase__ )"},"code_codestyle":{"kind":"number","value":66,"string":"66"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\rimport copy\rimport os\r\rimport cva\rimport numpy as np\rfrom matplotlib import pyplot as plt\r\r\r\r\r\rclass \t\t\t\tlowerCAmelCase :\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r def __init__(\t\t\tself )\t\t\t\t-> Dict:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\t''''''\r lowerCamelCase_\t\t\t\t\t =\t\t''''''\r lowerCamelCase_\t\t\t\t\t =\t\t[]\r lowerCamelCase_\t\t\t\t\t =\t\t0\r lowerCamelCase_\t\t\t\t\t =\t\t256\r lowerCamelCase_\t\t\t\t\t =\t\t0\r lowerCamelCase_\t\t\t\t\t =\t\t0\r lowerCamelCase_\t\t\t\t\t =\t\t0\r lowerCamelCase_\t\t\t\t\t =\t\t0\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself ,\t\tUpperCamelCase__ )\t\t\t\t-> Any:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r lowerCamelCase_\t\t\t\t\t =\t\tcva.imread(UpperCamelCase__ ,\t\t0 )\r lowerCamelCase_\t\t\t\t\t =\t\tcopy.deepcopy(self.img )\r lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_\t\t\t\t\t =\t\tplt.hist(self.img.ravel() ,\t\t256 ,\t\t[0, 256] ,\t\tlabel='''x''' )\r lowerCamelCase_\t\t\t\t\t =\t\tnp.sum(UpperCamelCase__ )\r for i in range(len(UpperCamelCase__ ) ):\r lowerCamelCase_\t\t\t\t\t =\t\tx[i] / self.k\r self.sk += prk\r lowerCamelCase_\t\t\t\t\t =\t\t(self.L - 1) * self.sk\r if self.rem != 0:\r lowerCamelCase_\t\t\t\t\t =\t\tint(last % last )\r lowerCamelCase_\t\t\t\t\t =\t\tint(last + 1 if self.rem >= 0.5 else last )\r self.last_list.append(UpperCamelCase__ )\r lowerCamelCase_\t\t\t\t\t =\t\tint(np.ma.count(self.img ) / self.img[1].size )\r lowerCamelCase_\t\t\t\t\t =\t\tself.img[1].size\r for i in range(self.number_of_cols ):\r for j in range(self.number_of_rows ):\r lowerCamelCase_\t\t\t\t\t =\t\tself.img[j][i]\r if num != self.last_list[num]:\r lowerCamelCase_\t\t\t\t\t =\t\tself.last_list[num]\r cva.imwrite('''output_data/output.jpg''' ,\t\tself.img )\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> str:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r plt.hist(self.img.ravel() ,\t\t256 ,\t\t[0, 256] )\r\r\r\r\r def _lowerCAmelCase\t\t(\t\t\tself )\t\t\t\t-> int:\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r cva.imshow('''Output-Image''' ,\t\tself.img )\r cva.imshow('''Input-Image''' ,\t\tself.original_image )\r cva.waitKey(5_000 )\r cva.destroyAllWindows()\r\r\rif __name__ == \"__main__\":\r __lowercase\t: List[Any] \t\t\t=\t\tos.path.join(os.path.basename(__file__), \"\"\"image_data/input.jpg\"\"\")\r __lowercase\t: List[str] \t\t\t=\t\tConstantStretch()\r stretcher.stretch(file_path)\r stretcher.plot_histogram()\r stretcher.show_image()"},"style_context_codestyle":{"kind":"number","value":66,"string":"66"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":514,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nimport unittest\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available\nfrom transformers.models.gpta.tokenization_gpta import GPTaTokenizer\nfrom transformers.testing_utils import require_keras_nlp, require_tf, slow\n\n\nif is_tf_available():\n\timport tensorflow as tf\n\nif is_keras_nlp_available():\n\tfrom transformers.models.gpta import TFGPTaTokenizer\n\n\n_lowerCamelCase \t\t\t\t\t= ['gpt2']\n_lowerCamelCase \t\t\t\t\t= 'gpt2'\n\nif is_tf_available():\n\n\n\n\n\tclass \tUpperCamelCase_\t\t\t\t\t\t\t( tf.Module ):\n\n\n\t\t\t\t\t\t\t\tdef __init__(\t\t\t\tself :Optional[Any]\t\t\t\t,\t\t\t\t\t__A :Union[str, Any]\t\t) -> List[Any]:\n\n\n\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\t\tsuper().__init__()\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tokenizer\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= AutoConfig.from_pretrained(__A\t\t)\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= TFGPTaLMHeadModel.from_config(__A\t\t)\n\n\n\t\t\t\t\t\t\t\t@tf.function(input_signature=(tf.TensorSpec((None,)\t\t\t\t,\t\t\t\t\ttf.string\t\t\t\t,\t\t\t\t\tname=\"\"\"text\"\"\"\t\t),)\t\t)\n\t\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :Optional[int]\t\t\t\t,\t\t\t\t\t__A :int\t\t) -> Optional[Any]:\n\n\n\n\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= self.tokenizer(__A\t\t)\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tokenized[\"\"\"input_ids\"\"\"].to_tensor()\n\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.cast(input_ids_dense > 0\t\t\t\t,\t\t\t\t\ttf.intaa\t\t)\n\t\t\t\t\t\t\t\t\t\t# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])\n\n\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= self.model(input_ids=__A\t\t\t\t,\t\t\t\t\tattention_mask=__A\t\t)[\"\"\"logits\"\"\"]\n\n\t\t\t\t\t\t\t\t\t\treturn outputs\n\n\n\n\n@require_tf\n@require_keras_nlp\nclass \tUpperCamelCase_\t\t\t\t\t\t\t( unittest.TestCase ):\n\n\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :List[Any]\t\t) -> Dict:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tsuper().setUp()\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= [GPTaTokenizer.from_pretrained(__A\t\t) for checkpoint in (TOKENIZER_CHECKPOINTS)]\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= [TFGPTaTokenizer.from_pretrained(__A\t\t) for checkpoint in TOKENIZER_CHECKPOINTS]\n\t\t\t\t\t\t\t\t\tassert len(self.tokenizers\t\t) == len(self.tf_tokenizers\t\t)\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= [\n\t\t\t\t\t\t\t\t\t \"\"\"This is a straightforward English test sentence.\"\"\",\n\t\t\t\t\t\t\t\t\t \"\"\"This one has some weird characters\\rto\\nsee\\r\\nif those\\u00E9break things.\"\"\",\n\t\t\t\t\t\t\t\t\t \"\"\"Now we're going to add some Chinese: 一 二 三 一二三\"\"\",\n\t\t\t\t\t\t\t\t\t \"\"\"And some much more rare Chinese: 齉 堃 齉堃\"\"\",\n\t\t\t\t\t\t\t\t\t \"\"\"Je vais aussi écrire en français pour tester les accents\"\"\",\n\t\t\t\t\t\t\t\t\t \"\"\"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ\"\"\",\n\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= list(zip(self.test_sentences\t\t\t\t,\t\t\t\t\tself.test_sentences[::-1]\t\t)\t\t)\n\n\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :Any\t\t) -> Any:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tfor tokenizer, tf_tokenizer in zip(self.tokenizers\t\t\t\t,\t\t\t\t\tself.tf_tokenizers\t\t):\n\t\t\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tokenizer([test_inputs]\t\t\t\t,\t\t\t\t\treturn_tensors=\"\"\"tf\"\"\"\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_tokenizer([test_inputs]\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in python_outputs.keys():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# convert them to numpy to avoid messing with ragged tensors\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= python_outputs[key].numpy()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_outputs[key].numpy()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape\t\t)\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(tf.cast(__A\t\t\t\t,\t\t\t\t\ttf.intaa\t\t) == tf_outputs_values\t\t)\t\t)\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :Any\t\t) -> int:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.function(__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\tfor test_inputs in self.test_sentences:\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.constant(__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= compiled_tokenizer(__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_tokenizer(__A\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor key in eager_outputs.keys():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]\t\t)\t\t)\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :List[Any]\t\t) -> Any:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= ModelToSave(tokenizer=__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t\t)\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= model.serving(__A\t\t) # Build model with some sample inputs\n\t\t\t\t\t\t\t\t\t\t\twith TemporaryDirectory() as tempdir:\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= Path(__A\t\t) / \"\"\"saved.model\"\"\"\n\t\t\t\t\t\t\t\t\t\t\t\t\ttf.saved_model.save(__A\t\t\t\t,\t\t\t\t\t__A\t\t\t\t,\t\t\t\t\tsignatures={\"\"\"serving_default\"\"\": model.serving}\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.saved_model.load(__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= loaded_model.signatures[\"\"\"serving_default\"\"\"](__A\t\t)[\"\"\"output_0\"\"\"]\n\t\t\t\t\t\t\t\t\t\t\t# We may see small differences because the loaded model is compiled, so we need an epsilon for the test\n\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(out == loaded_output\t\t)\t\t)\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :Dict\t\t) -> int:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t\t)\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_tokenizer(__A\t\t) # Build model with some sample inputs\n\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_tokenizer.get_config()\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= TFGPTaTokenizer.from_config(__A\t\t)\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= model_from_config(__A\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tfor key in from_config_output.keys():\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(tf.reduce_all(from_config_output[key] == out[key]\t\t)\t\t)\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef _snake_case\t(\t\t\t\tself :List[Any]\t\t) -> Any:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tfor tf_tokenizer in self.tf_tokenizers:\n\t\t\t\t\t\t\t\t\t\t\t# for the test to run\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= 12_3123\n\n\t\t\t\t\t\t\t\t\t\t\tfor max_length in [3, 5, 1024]:\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf.convert_to_tensor([self.test_sentences[0]]\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= tf_tokenizer(__A\t\t\t\t,\t\t\t\t\tmax_length=__A\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t\t= out[\"\"\"input_ids\"\"\"].numpy().shape[1]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\tassert out_length == max_length"},"code_codestyle":{"kind":"number","value":6,"string":"6"},"style_context":{"kind":"string","value":"\n\nfrom copy import deepcopy\n\nclass __UpperCamelCase :\n\n\n\n\n\tdef __init__(\t\tself\t\t\t\t\t\t: List[str]\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: list[int] | None = None\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int | None = None\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tif arr is None and size is not None:\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= size\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= [0] * size\n\t\telif arr is not None:\n\t\t\tself.init(lowerCAmelCase\t\t\t\t)\n\t\telse:\n\t\t\traise ValueError(\"Either arr or size must be specified\"\t\t\t\t)\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: Tuple\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: list[int]\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= len(lowerCAmelCase\t\t\t\t)\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= deepcopy(lowerCAmelCase\t\t\t\t)\n\t\tfor i in range(1\t\t,\t\t\t\t\t\tself.size\t\t\t\t):\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.next_(lowerCAmelCase\t\t\t\t)\n\t\t\tif j < self.size:\n\t\t\t\tself.tree[j] += self.tree[i]\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: Tuple\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.tree[:]\n\t\tfor i in range(self.size - 1\t\t,\t\t\t\t\t\t0\t\t,\t\t\t\t\t\t-1\t\t\t\t):\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.next_(lowerCAmelCase\t\t\t\t)\n\t\t\tif j < self.size:\n\t\t\t\tarr[j] -= arr[i]\n\t\treturn arr\n\n\n\n\n\t@staticmethod\n\tdef __A (\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\treturn index + (index & (-index))\n\n\n\n\n\t@staticmethod\n\tdef __A (\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\treturn index - (index & (-index))\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: List[str]\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tif index == 0:\n\t\t\tself.tree[0] += value\n\t\t\treturn\n\t\twhile index < self.size:\n\t\t\tself.tree[index] += value\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.next_(lowerCAmelCase\t\t\t\t)\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: List[str]\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tself.add(lowerCAmelCase\t\t,\t\t\t\t\t\tvalue - self.get(lowerCAmelCase\t\t\t\t)\t\t\t\t)\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: Tuple\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tif right == 0:\n\t\t\treturn 0\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.tree[0]\n\t\tright -= 1 # make right inclusive\n\t\twhile right > 0:\n\t\t\tresult += self.tree[right]\n\t\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= self.prev(lowerCAmelCase\t\t\t\t)\n\t\treturn result\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: Any\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\treturn self.prefix(lowerCAmelCase\t\t\t\t) - self.prefix(lowerCAmelCase\t\t\t\t)\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: Any\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\treturn self.query(lowerCAmelCase\t\t,\t\t\t\t\t\tindex + 1\t\t\t\t)\n\n\n\n\n\tdef __A (\t\tself\t\t\t\t\t\t: List[str]\t\t,\t\t\t\t\t\tlowerCAmelCase\t\t\t\t\t\t: int\t\t\t\t):\n\n\n\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\tvalue -= self.tree[0]\n\t\tif value < 0:\n\t\t\treturn -1\n\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= 1 # Largest power of 2 <= size\n\t\twhile j * 2 < self.size:\n\t\t\tj *= 2\n\n\t\tUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= 0\n\n\t\twhile j > 0:\n\t\t\tif i + j < self.size and self.tree[i + j] <= value:\n\t\t\t\tvalue -= self.tree[i + j]\n\t\t\t\ti += j\n\t\t\tj //= 2\n\t\treturn i\n\n\nif __name__ == \"__main__\":\n\t\t\t\timport doctest\n\n\t\t\t\tdoctest.testmod()"},"style_context_codestyle":{"kind":"number","value":162,"string":"162"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":515,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rimport argparse\rimport glob\rimport logging\rimport os\rfrom argparse import Namespace\rfrom importlib import import_module\r\rimport numpy as np\rimport torch\rfrom lightning_base import BaseTransformer, add_generic_args, generic_train\rfrom seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score\rfrom torch.nn import CrossEntropyLoss\rfrom torch.utils.data import DataLoader, TensorDataset\rfrom utils_ner import TokenClassificationTask\r\r\r__UpperCAmelCase : Union[str, Any] =\t\t\t\t\t\t\tlogging.getLogger(__name__)\r\r\r\r\r\r\r\rclass \t\t\t\t\t__snake_case (\t\t\t\t\t\t\t__snake_case ):\r\r\r\r\r\r\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\tlowerCAmelCase__ \t= 'token-classification'\r\r\r\r\r\t\t\t\t\t\t\tdef __init__(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tstr\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tTuple\t):\r\r\r\r\r\t\t\t\t\t\t\t\t\tif type(A_\t) == dict:\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tAny\t\t\t\t\t\t\t\t\t\t=\t\tNamespace(**A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\timport_module(\"\"\"tasks\"\"\"\t)\r\t\t\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tDict\t\t\t\t\t\t\t\t\t\t=\t\tgetattr(A_\t\t\t\t\t\t\t, hparams.task_type\t)\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tTokenClassificationTask\t\t\t\t\t\t\t\t\t\t=\t\ttoken_classification_task_clazz()\r\t\t\t\t\t\t\t\t\texcept AttributeError:\r\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''\r\t\t\t\t\t\t\t\t\t\t\t f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}'''\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t=\t\tself.token_classification_task.get_labels(hparams.labels\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tAny\t\t\t\t\t\t\t\t\t\t=\t\tCrossEntropyLoss().ignore_index\r\t\t\t\t\t\t\t\t\tsuper().__init__(A_\t\t\t\t\t\t\t, len(self.labels\t)\t\t\t\t\t\t\t, self.mode\t)\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t, **A\t\t\t\t:\t\t\t\t\tDict\t):\r\t\t\t\t\t\t\t\t\treturn self.model(**A_\t)\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tDict\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tList[str]\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\t{\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\r\t\t\t\t\t\t\t\t\tif self.config.model_type != \"distilbert\":\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\t(\r\t\t\t\t\t\t\t\t\t\t\t batch[2] if self.config.model_type in [\"bert\", \"xlnet\"] else None\r\t\t\t\t\t\t\t\t\t\t\t) # XLM and RoBERTa don\"t use token_type_ids\r\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tself(**A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t=\t\toutputs[0]\r\t\t\t\t\t\t\t\t\t# tensorboard_logs = {\"loss\": loss, \"rate\": self.lr_scheduler.get_last_lr()[-1]}\r\t\t\t\t\t\t\t\t\treturn {\"loss\": loss}\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tList[Any]\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tself.hparams\r\t\t\t\t\t\t\t\t\tfor mode in [\"train\", \"dev\", \"test\"]:\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\tself._feature_file(A_\t)\r\t\t\t\t\t\t\t\t\t\t\tif os.path.exists(A_\t) and not args.overwrite_cache:\r\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"Loading features from cached file %s\"\"\"\t\t\t\t\t\t\t, A_\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\ttorch.load(A_\t)\r\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"Creating features from dataset file at %s\"\"\"\t\t\t\t\t\t\t, args.data_dir\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tint\t\t\t\t\t\t\t\t\t\t=\t\tself.token_classification_task.read_examples_from_file(args.data_dir\t\t\t\t\t\t\t, A_\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\tself.token_classification_task.convert_examples_to_features(\r\t\t\t\t\t\t\t\t\t\t\t\t\t A_\t\t\t\t\t\t\t, self.labels\t\t\t\t\t\t\t, args.max_seq_length\t\t\t\t\t\t\t, self.tokenizer\t\t\t\t\t\t\t, cls_token_at_end=bool(self.config.model_type in [\"\"\"xlnet\"\"\"]\t)\t\t\t\t\t\t\t, cls_token=self.tokenizer.cls_token\t\t\t\t\t\t\t, cls_token_segment_id=2 if self.config.model_type in [\"\"\"xlnet\"\"\"] else 0\t\t\t\t\t\t\t, sep_token=self.tokenizer.sep_token\t\t\t\t\t\t\t, sep_token_extra=A_\t\t\t\t\t\t\t, pad_on_left=bool(self.config.model_type in [\"\"\"xlnet\"\"\"]\t)\t\t\t\t\t\t\t, pad_token=self.tokenizer.pad_token_id\t\t\t\t\t\t\t, pad_token_segment_id=self.tokenizer.pad_token_type_id\t\t\t\t\t\t\t, pad_token_label_id=self.pad_token_label_id\t\t\t\t\t\t\t, )\r\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"Saving features into cached file %s\"\"\"\t\t\t\t\t\t\t, A_\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\ttorch.save(A_\t\t\t\t\t\t\t, A_\t)\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tTuple\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tstr = False\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\tself._feature_file(A_\t)\r\t\t\t\t\t\t\t\t\tlogger.info(\"\"\"Loading features from cached file %s\"\"\"\t\t\t\t\t\t\t, A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\ttorch.load(A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\ttorch.tensor([f.input_ids for f in features]\t\t\t\t\t\t\t, dtype=torch.long\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t=\t\ttorch.tensor([f.attention_mask for f in features]\t\t\t\t\t\t\t, dtype=torch.long\t)\r\t\t\t\t\t\t\t\t\tif features[0].token_type_ids is not None:\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t=\t\ttorch.tensor([f.token_type_ids for f in features]\t\t\t\t\t\t\t, dtype=torch.long\t)\r\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\ttorch.tensor([0 for f in features]\t\t\t\t\t\t\t, dtype=torch.long\t)\r\t\t\t\t\t\t\t\t\t\t\t# HACK(we will not use this anymore soon)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\ttorch.tensor([f.label_ids for f in features]\t\t\t\t\t\t\t, dtype=torch.long\t)\r\t\t\t\t\t\t\t\t\treturn DataLoader(\r\t\t\t\t\t\t\t\t\t TensorDataset(A_\t\t\t\t\t\t\t, A_\t\t\t\t\t\t\t, A_\t\t\t\t\t\t\t, A_\t)\t\t\t\t\t\t\t, batch_size=A_\t)\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tTuple\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tAny\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tTuple\t):\r\t\t\t\t\t\t\t\t\t\"\"\"Compute validation\"\"\" \"\"\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tint\t\t\t\t\t\t\t\t\t\t=\t\t{\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\r\t\t\t\t\t\t\t\t\tif self.config.model_type != \"distilbert\":\r\t\t\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t=\t\t(\r\t\t\t\t\t\t\t\t\t\t\t batch[2] if self.config.model_type in [\"bert\", \"xlnet\"] else None\r\t\t\t\t\t\t\t\t\t\t\t) # XLM and RoBERTa don\"t use token_type_ids\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\tself(**A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\toutputs[:2]\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\tlogits.detach().cpu().numpy()\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\tinputs[\"labels\"].detach().cpu().numpy()\r\t\t\t\t\t\t\t\t\treturn {\"val_loss\": tmp_eval_loss.detach().cpu(), \"pred\": preds, \"target\": out_label_ids}\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tstr\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tOptional[int]\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t=\t\ttorch.stack([x[\"\"\"val_loss\"\"\"] for x in outputs]\t).mean()\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tnp.concatenate([x[\"\"\"pred\"\"\"] for x in outputs]\t\t\t\t\t\t\t, axis=0\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tnp.argmax(A_\t\t\t\t\t\t\t, axis=2\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\tnp.concatenate([x[\"\"\"target\"\"\"] for x in outputs]\t\t\t\t\t\t\t, axis=0\t)\r\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tdict(enumerate(self.labels\t)\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\t[[] for _ in range(out_label_ids.shape[0]\t)]\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t=\t\t[[] for _ in range(out_label_ids.shape[0]\t)]\r\r\t\t\t\t\t\t\t\t\tfor i in range(out_label_ids.shape[0]\t):\r\t\t\t\t\t\t\t\t\t\t\tfor j in range(out_label_ids.shape[1]\t):\r\t\t\t\t\t\t\t\t\t\t\t\t\tif out_label_ids[i, j] != self.pad_token_label_id:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_label_list[i].append(label_map[out_label_ids[i][j]]\t)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpreds_list[i].append(label_map[preds[i][j]]\t)\r\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\t{\r\t\t\t\t\t\t\t\t\t \"val_loss\": val_loss_mean,\r\t\t\t\t\t\t\t\t\t \"accuracy_score\": accuracy_score(A_\t\t\t\t\t\t\t, A_\t),\r\t\t\t\t\t\t\t\t\t \"precision\": precision_score(A_\t\t\t\t\t\t\t, A_\t),\r\t\t\t\t\t\t\t\t\t \"recall\": recall_score(A_\t\t\t\t\t\t\t, A_\t),\r\t\t\t\t\t\t\t\t\t \"f1\": fa_score(A_\t\t\t\t\t\t\t, A_\t),\r\t\t\t\t\t\t\t\t\t}\r\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\tdict(results.items()\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tint\t\t\t\t\t\t\t\t\t\t=\t\tresults\r\t\t\t\t\t\t\t\t\treturn ret, preds_list, out_label_list\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tTuple\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tOptional[int]\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\tself._eval_end(A_\t)\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tDict\t\t\t\t\t\t\t\t\t\t=\t\tret[\"log\"]\r\t\t\t\t\t\t\t\t\treturn {\"val_loss\": logs[\"val_loss\"], \"log\": logs, \"progress_bar\": logs}\r\r\r\r\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tself\t\t\t\t:\t\t\t\t\tint\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tTuple\t):\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tself._eval_end(A_\t)\r\r\t\t\t\t\t\t\t\t\t# Converting to the dict required by pl\r\t\t\t\t\t\t\t\t\t# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\\\r\t\t\t\t\t\t\t\t\t# pytorch_lightning/trainer/logging.py#L139\r\t\t\t\t\t\t\t\t\t__snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\tret[\"log\"]\r\t\t\t\t\t\t\t\t\t# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`\r\t\t\t\t\t\t\t\t\treturn {\"avg_test_loss\": logs[\"val_loss\"], \"log\": logs, \"progress_bar\": logs}\r\r\r\r\r\r\r\t\t\t\t\t\t\t@staticmethod\r\t\t\t\t\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__\t\t\t\t(\t\t\t\t\t\tA\t\t\t\t:\t\t\t\t\tAny\t\t\t\t\t\t\t, A\t\t\t\t:\t\t\t\t\tstr\t):\r\t\t\t\t\t\t\t\t\tBaseTransformer.add_model_specific_args(A_\t\t\t\t\t\t\t, A_\t)\r\t\t\t\t\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t\t\t\t\t \"\"\"--task_type\"\"\"\t\t\t\t\t\t\t, default=\"\"\"NER\"\"\"\t\t\t\t\t\t\t, type=A_\t\t\t\t\t\t\t, help=\"\"\"Task type to fine tune in training (e.g. NER, POS, etc)\"\"\"\t)\r\t\t\t\t\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t\t\t\t\t \"\"\"--max_seq_length\"\"\"\t\t\t\t\t\t\t, default=128\t\t\t\t\t\t\t, type=A_\t\t\t\t\t\t\t, help=(\r\t\t\t\t\t\t\t\t\t \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\r\t\t\t\t\t\t\t\t\t \"\"\"than this will be truncated, sequences shorter will be padded.\"\"\"\r\t\t\t\t\t\t\t\t\t )\t\t\t\t\t\t\t, )\r\r\t\t\t\t\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t\t\t\t\t \"\"\"--labels\"\"\"\t\t\t\t\t\t\t, default=\"\"\"\"\"\"\t\t\t\t\t\t\t, type=A_\t\t\t\t\t\t\t, help=\"\"\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\"\"\"\t\t\t\t\t\t\t, )\r\t\t\t\t\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t\t\t\t\t \"\"\"--gpus\"\"\"\t\t\t\t\t\t\t, default=0\t\t\t\t\t\t\t, type=A_\t\t\t\t\t\t\t, help=\"\"\"The number of GPUs allocated for this, it is by default 0 meaning none\"\"\"\t\t\t\t\t\t\t, )\r\r\t\t\t\t\t\t\t\t\tparser.add_argument(\r\t\t\t\t\t\t\t\t\t \"\"\"--overwrite_cache\"\"\"\t\t\t\t\t\t\t, action=\"\"\"store_true\"\"\"\t\t\t\t\t\t\t, help=\"\"\"Overwrite the cached training and evaluation sets\"\"\"\t)\r\r\t\t\t\t\t\t\t\t\treturn parser\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t__UpperCAmelCase : Tuple =\t\t\t\t\t\t\targparse.ArgumentParser()\r\t\t\t\t\t\tadd_generic_args(parser, os.getcwd())\r\t\t\t\t\t\t__UpperCAmelCase : Union[str, Any] =\t\t\t\t\t\t\tNERTransformer.add_model_specific_args(parser, os.getcwd())\r\t\t\t\t\t\t__UpperCAmelCase : Tuple =\t\t\t\t\t\t\tparser.parse_args()\r\t\t\t\t\t\t__UpperCAmelCase : Any =\t\t\t\t\t\t\tNERTransformer(args)\r\t\t\t\t\t\t__UpperCAmelCase : Tuple =\t\t\t\t\t\t\tgeneric_train(model, args)\r\r\t\t\t\t\t\tif args.do_predict:\r\t\t\t\t\t\t\t\t\t\t\t\t# See https://github.com/huggingface/transformers/issues/3159\r\t\t\t\t\t\t\t\t\t\t\t\t# pl use this default format to create a checkpoint:\r\t\t\t\t\t\t\t\t\t\t\t\t# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\\\r\t\t\t\t\t\t\t\t\t\t\t\t# /pytorch_lightning/callbacks/model_checkpoint.py#L322\r\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : List[Any] =\t\t\t\t\t\t\tsorted(glob.glob(os.path.join(args.output_dir, \"checkpoint-epoch=*.ckpt\"), recursive=True))\r\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase : Union[str, Any] =\t\t\t\t\t\t\tmodel.load_from_checkpoint(checkpoints[-1])\r\t\t\t\t\t\t\t\t\t\t\t\ttrainer.test(model)\r\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":716,"string":"716"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rimport os\rfrom argparse import ArgumentParser\rfrom typing import List\r\rimport torch.utils.data\r\rfrom datasets import Dataset, IterableDataset\rfrom datasets.distributed import split_dataset_by_node\r\r\r__UpperCAmelCase : Optional[Any] =\t\t\t\t\t\t\t4\r__UpperCAmelCase : str =\t\t\t\t\t\t\t3\r\r\r\r\r\r\r\rclass \t\t\t\t\t__snake_case (\t\t\t\t\t\t\t__lowerCamelCase ):\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r\r pass\r\r\r\r\r\r\r\rdef A__\t\t\t\t\t\t\t(\t\t\t\t\tSCREAMING_SNAKE_CASE__) ->\t\t\t\t\tList[str]:\r for shard in shards:\r for i in range(SCREAMING_SNAKE_CASE__):\r yield {\"i\": i, \"shard\": shard}\r\r\r\r\r\r\r\rdef A__\t\t\t\t\t\t\t(\t\t\t\t\t) ->\t\t\t\t\tOptional[Any]:\r __snake_case:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t=\t\tint(os.environ[\"\"\"RANK\"\"\"])\r __snake_case:\t\tDict\t\t\t\t\t\t\t\t\t\t=\t\tint(os.environ[\"\"\"WORLD_SIZE\"\"\"])\r\r __snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\tArgumentParser()\r parser.add_argument(\"\"\"--streaming\"\"\" , type=SCREAMING_SNAKE_CASE__)\r parser.add_argument(\"\"\"--local_rank\"\"\" , type=SCREAMING_SNAKE_CASE__)\r parser.add_argument(\"\"\"--num_workers\"\"\" , type=SCREAMING_SNAKE_CASE__ , default=0)\r __snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\tparser.parse_args()\r __snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\targs.streaming\r __snake_case:\t\tDict\t\t\t\t\t\t\t\t\t\t=\t\targs.num_workers\r\r __snake_case:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t=\t\t{\"\"\"shards\"\"\": [F'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE__)]}\r __snake_case:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t=\t\tIterableDataset.from_generator(SCREAMING_SNAKE_CASE__ , gen_kwargs=SCREAMING_SNAKE_CASE__)\r if not streaming:\r __snake_case:\t\tint\t\t\t\t\t\t\t\t\t\t=\t\tDataset.from_list(list(SCREAMING_SNAKE_CASE__))\r\r __snake_case:\t\tList[str]\t\t\t\t\t\t\t\t\t\t=\t\tsplit_dataset_by_node(SCREAMING_SNAKE_CASE__ , rank=SCREAMING_SNAKE_CASE__ , world_size=SCREAMING_SNAKE_CASE__)\r __snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\ttorch.utils.data.DataLoader(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__)\r\r __snake_case:\t\tint\t\t\t\t\t\t\t\t\t\t=\t\tNUM_SHARDS * NUM_ITEMS_PER_SHARD\r __snake_case:\t\tstr\t\t\t\t\t\t\t\t\t\t=\t\tfull_size // world_size\r expected_local_size += int(rank < (full_size % world_size))\r\r __snake_case:\t\tTuple\t\t\t\t\t\t\t\t\t\t=\t\tsum(1 for _ in dataloader)\r if local_size != expected_local_size:\r raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''')\r\r\rif __name__ == \"__main__\":\r main()\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":155,"string":"155"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":516,"cells":{"code":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\nclass __magic_name__\t\t:\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef __init__(\t\t\t\tself :\t\t\tOptional[Any]\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t_a\t: dict[str, TrieNode]\t\t\t\t\t\t\t\t=\t\t\t\t{} # Mapping from char to TrieNode\n\t\t\t\t\t\t\t\t\t_a\t: List[Any]\t\t\t\t\t\t\t\t=\t\t\t\tFalse\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t__lowercase (\t\t\t\tself :\t\t\tUnion[str, Any] ,_UpperCAmelCase :\t\t\tlist[str]\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\tfor word in words:\n\t\t\t\t\t\t\t\t\t\t\tself.insert(_UpperCAmelCase\t\t\t\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t__lowercase (\t\t\t\tself :\t\t\tAny ,_UpperCAmelCase :\t\t\tstr\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t_a\t: str\t\t\t\t\t\t\t\t=\t\t\t\tself\n\t\t\t\t\t\t\t\t\tfor char in word:\n\t\t\t\t\t\t\t\t\t\t\tif char not in curr.nodes:\n\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t: List[Any]\t\t\t\t\t\t\t\t=\t\t\t\tTrieNode()\n\t\t\t\t\t\t\t\t\t\t\t_a\t: Dict\t\t\t\t\t\t\t\t=\t\t\t\tcurr.nodes[char]\n\t\t\t\t\t\t\t\t\t_a\t: Optional[int]\t\t\t\t\t\t\t\t=\t\t\t\tTrue\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t__lowercase (\t\t\t\tself :\t\t\tOptional[Any] ,_UpperCAmelCase :\t\t\tstr\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t_a\t: Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\tself\n\t\t\t\t\t\t\t\t\tfor char in word:\n\t\t\t\t\t\t\t\t\t\t\tif char not in curr.nodes:\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\t\t\t\t\t\t_a\t: Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\tcurr.nodes[char]\n\t\t\t\t\t\t\t\t\treturn curr.is_leaf\n\n\n\n\n\n\n\n\t\t\t\t\t\t\tdef \t\t\t\t__lowercase (\t\t\t\tself :\t\t\tint ,_UpperCAmelCase :\t\t\tstr\t\t\t\t\t\t):\n\n\t\t\t\t\t\t\t\t\tdef _delete(_UpperCAmelCase :\t\t\tTrieNode ,_UpperCAmelCase :\t\t\tstr ,_UpperCAmelCase :\t\t\tint\t\t\t\t\t\t) -> bool:\n\t\t\t\t\t\t\t\t\t\t\tif index == len(_UpperCAmelCase\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t# If word does not exist\n\t\t\t\t\t\t\t\t\t\t\t\t\tif not curr.is_leaf:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t: str\t\t\t\t\t\t\t\t=\t\t\t\tFalse\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn len(curr.nodes\t\t\t\t\t\t) == 0\n\t\t\t\t\t\t\t\t\t\t\t_a\t: List[str]\t\t\t\t\t\t\t\t=\t\t\t\tword[index]\n\t\t\t\t\t\t\t\t\t\t\t_a\t: Any\t\t\t\t\t\t\t\t=\t\t\t\tcurr.nodes.get(_UpperCAmelCase\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t# If char not in current trie node\n\t\t\t\t\t\t\t\t\t\t\tif not char_node:\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\n\t\t\t\t\t\t\t\t\t\t\t# Flag to check if node can be deleted\n\t\t\t\t\t\t\t\t\t\t\t_a\t: Optional[int]\t\t\t\t\t\t\t\t=\t\t\t\t_delete(_UpperCAmelCase ,_UpperCAmelCase ,index + 1\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tif delete_curr:\n\t\t\t\t\t\t\t\t\t\t\t\t\tdel curr.nodes[char]\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn len(curr.nodes\t\t\t\t\t\t) == 0\n\t\t\t\t\t\t\t\t\t\t\treturn delete_curr\n\n\t\t\t\t\t\t\t\t\t_delete(self ,_UpperCAmelCase ,0\t\t\t\t\t\t)\n\n\n\ndef \t__lowerCamelCase\t( lowerCAmelCase_\t\t, lowerCAmelCase_ ) -> None:\n\t\tif node.is_leaf:\n\t\t\t\tprint(lowerCAmelCase_\t\t, end=' ' )\n\n\t\tfor key, value in node.nodes.items():\n\t\t\t\tprint_words(lowerCAmelCase_\t\t, word + key )\n\n\n\ndef \t__lowerCamelCase\t( ) -> bool:\n\t\t_a\t: Optional[Any]\t\t\t\t\t\t\t\t=\t\t\t\t'banana bananas bandana band apple all beast'.split()\n\t\t_a\t: str\t\t\t\t\t\t\t\t=\t\t\t\tTrieNode()\n\t\troot.insert_many(lowerCAmelCase_ )\n\t\t# print_words(root, \"\")\n\t\tassert all(root.find(lowerCAmelCase_ ) for word in words )\n\t\tassert root.find('banana' )\n\t\tassert not root.find('bandanas' )\n\t\tassert not root.find('apps' )\n\t\tassert root.find('apple' )\n\t\tassert root.find('all' )\n\t\troot.delete('all' )\n\t\tassert not root.find('all' )\n\t\troot.delete('banana' )\n\t\tassert not root.find('banana' )\n\t\tassert root.find('bananas' )\n\t\treturn True\n\n\n\ndef \t__lowerCamelCase\t( lowerCAmelCase_\t\t, lowerCAmelCase_ ) -> None:\n\t\tprint(str(lowerCAmelCase_ )\t\t, 'works!' if passes else 'doesn\\'t work :(' )\n\n\n\ndef \t__lowerCamelCase\t( ) -> None:\n\t\tassert test_trie()\n\n\n\ndef \t__lowerCamelCase\t( ) -> None:\n\t\tprint_results('Testing trie functionality'\t\t, test_trie() )\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tmain()\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":358,"string":"358"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\nfrom .configuration_bert_masked import MaskedBertConfig\nfrom .modeling_bert_masked import (\n MaskedBertForMultipleChoice,\n MaskedBertForQuestionAnswering,\n MaskedBertForSequenceClassification,\n MaskedBertForTokenClassification,\n MaskedBertModel,\n)\nfrom .modules import *\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":358,"string":"358"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":517,"cells":{"code":{"kind":"string","value":"\r\rimport importlib\rimport sys\rfrom argparse import REMAINDER, ArgumentParser\rfrom pathlib import Path\r\rimport torch_xla.distributed.xla_multiprocessing as xmp\r\r\r\r\r\rdef a():\r\r\r\r\r\r\t'''simple docstring'''\r\tsnake_case_ \t\t= ArgumentParser(\r\t description=(\r\t 'PyTorch TPU distributed training launch '\r\t 'helper utility that will spawn up '\r\t 'multiple distributed processes'\r\t )\t)\r\r\t# Optional arguments for the launch helper\r\tparser.add_argument('--num_cores'\t\t\t, type=lowercase__\t\t\t, default=1\t\t\t, help='Number of TPU cores to use (1 or 8).'\t)\r\r\t# positional\r\tparser.add_argument(\r\t 'training_script'\t\t\t, type=lowercase__\t\t\t, help=(\r\t 'The full path to the single TPU training '\r\t 'program/script to be launched in parallel, '\r\t 'followed by all the arguments for the '\r\t 'training script'\r\t )\t\t\t, )\r\r\t# rest from the training program\r\tparser.add_argument('training_script_args'\t\t\t, nargs=lowercase__\t)\r\r\treturn parser.parse_args()\r\r\r\r\r\rdef a():\r\r\r\r\r\r\t'''simple docstring'''\r\tsnake_case_ \t\t= parse_args()\r\r\t# Import training_script as a module.\r\tsnake_case_ \t\t= Path(args.training_script\t)\r\tsys.path.append(str(script_fpath.parent.resolve()\t)\t)\r\tsnake_case_ \t\t= script_fpath.stem\r\tsnake_case_ \t\t= importlib.import_module(lowercase__\t)\r\r\t# Patch sys.argv\r\tsnake_case_ \t\t= [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores\t)]\r\txmp.spawn(mod._mp_fn\t\t\t, args=()\t\t\t, nprocs=args.num_cores\t)\r\r\rif __name__ == \"__main__\":\r\t\tmain()\r"},"code_codestyle":{"kind":"number","value":46,"string":"46"},"style_context":{"kind":"string","value":"\r\rfrom collections import defaultdict\r\r\r\r\r\rdef a(lowercase__\t\t\t, lowercase__\t):\r\r\r\r\r\r\t'''simple docstring'''\r\tsnake_case_ \t\t= first_str.lower().strip()\r\tsnake_case_ \t\t= second_str.lower().strip()\r\r\t# Remove whitespace\r\tsnake_case_ \t\t= first_str.replace(' '\t\t\t, ''\t)\r\tsnake_case_ \t\t= second_str.replace(' '\t\t\t, ''\t)\r\r\t# Strings of different lengths are not anagrams\r\tif len(lowercase__\t) != len(lowercase__\t):\r\t\treturn False\r\r\t# Default values for count should be 0\r\tsnake_case_ \t\t= defaultdict(lowercase__\t)\r\r\t# For each character in input strings,\r\t# increment count in the corresponding\r\tfor i in range(len(lowercase__\t)\t):\r\t\tcount[first_str[i]] += 1\r\t\tcount[second_str[i]] -= 1\r\r\treturn all(_count == 0 for _count in count.values()\t)\r\r\rif __name__ == \"__main__\":\r\t\tfrom doctest import testmod\r\r\t\ttestmod()\r\t\tA\t\t\t\t\t\t\t\t\t\t\t= input('Enter the first string ').strip()\r\t\tA\t\t\t\t\t\t\t\t\t\t\t= input('Enter the second string ').strip()\r\r\t\tA\t\t\t\t\t\t\t\t\t\t\t= check_anagrams(input_a, input_b)\r\t\tprint(f\"\"\"{input_a} and {input_b} are {\"\" if status else \"not \"}anagrams.\"\"\")\r"},"style_context_codestyle":{"kind":"number","value":46,"string":"46"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":518,"cells":{"code":{"kind":"string","value":"\r\r\r\rimport copy\rimport os\rfrom typing import Union\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\rfrom ...utils import logging\rfrom ..auto import CONFIG_MAPPING\r\r\ra__ :\t\t\t\t\tUnion[str, Any] \t\t\t\t\t\t= logging.get_logger(__name__)\r\ra__ :\t\t\t\t\tList[Any] \t\t\t\t\t\t= {\r \"Salesforce/instruct-blip-flan-t5\": \"https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json\",\r}\r\r\r\rclass UpperCAmelCase__(\t\t\t\tlowerCamelCase\t\t\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r A :\tstr\t\t\t\t\t\t\t\t\t\t\t\t= \"instructblip_vision_model\"\r\r\r def __init__( self\t: Tuple ,\t\t\t\t\tlowerCAmelCase\t: List[str]=14_08 ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=61_44 ,\t\t\t\t\tlowerCAmelCase\t: Optional[Any]=39 ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=16 ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=2_24 ,\t\t\t\t\tlowerCAmelCase\t: Optional[int]=14 ,\t\t\t\t\tlowerCAmelCase\t: List[Any]=\"gelu\" ,\t\t\t\t\tlowerCAmelCase\t: Optional[Any]=1E-6 ,\t\t\t\t\tlowerCAmelCase\t: Optional[Any]=0.0 ,\t\t\t\t\tlowerCAmelCase\t: List[Any]=1E-1_0 ,\t\t\t\t\tlowerCAmelCase\t: Any=True ,\t\t\t\t\t**lowerCAmelCase\t: Dict ,\t\t\t\t\t)\t\t\t\t\t\t->\t\t\t\t\tAny:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r super().__init__(**lowerCAmelCase)\r\r lowercase__ =\thidden_size\r lowercase__ =\tintermediate_size\r lowercase__ =\tnum_hidden_layers\r lowercase__ =\tnum_attention_heads\r lowercase__ =\tpatch_size\r lowercase__ =\timage_size\r lowercase__ =\tinitializer_range\r lowercase__ =\tattention_dropout\r lowercase__ =\tlayer_norm_eps\r lowercase__ =\thidden_act\r lowercase__ =\tqkv_bias\r\r\r\r @classmethod\r def UpperCAmelCase\t\t( cls\t: Optional[int] ,\t\t\t\t\tlowerCAmelCase\t: Union[str, os.PathLike] ,\t\t\t\t\t**lowerCAmelCase\t: Optional[int])\t\t\t\t\t\t->\t\t\t\t\t\"PretrainedConfig\":\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r cls._set_token_in_kwargs(lowerCAmelCase)\r\r lowercase__,\t\t\t\t\t\tlowercase__ =\tcls.get_config_dict(lowerCAmelCase ,\t\t\t\t\t**lowerCAmelCase)\r\r # get the vision config dict if we are loading from InstructBlipConfig\r if config_dict.get('model_type') == \"instructblip\":\r lowercase__ =\tconfig_dict['vision_config']\r\r if \"model_type\" in config_dict and hasattr(cls ,\t\t\t\t\t'model_type') and config_dict[\"model_type\"] != cls.model_type:\r logger.warning(\r f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''\r f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')\r\r return cls.from_dict(lowerCAmelCase ,\t\t\t\t\t**lowerCAmelCase)\r\r\r\rclass UpperCAmelCase__(\t\t\t\tlowerCamelCase\t\t\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r A :\tDict\t\t\t\t\t\t\t\t\t\t\t\t= \"instructblip_qformer\"\r\r\r def __init__( self\t: Union[str, Any] ,\t\t\t\t\tlowerCAmelCase\t: str=3_05_22 ,\t\t\t\t\tlowerCAmelCase\t: Tuple=7_68 ,\t\t\t\t\tlowerCAmelCase\t: Optional[int]=12 ,\t\t\t\t\tlowerCAmelCase\t: int=12 ,\t\t\t\t\tlowerCAmelCase\t: str=30_72 ,\t\t\t\t\tlowerCAmelCase\t: Optional[Any]=\"gelu\" ,\t\t\t\t\tlowerCAmelCase\t: Any=0.1 ,\t\t\t\t\tlowerCAmelCase\t: Any=0.1 ,\t\t\t\t\tlowerCAmelCase\t: List[str]=5_12 ,\t\t\t\t\tlowerCAmelCase\t: List[Any]=0.02 ,\t\t\t\t\tlowerCAmelCase\t: Any=1E-1_2 ,\t\t\t\t\tlowerCAmelCase\t: Any=0 ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=\"absolute\" ,\t\t\t\t\tlowerCAmelCase\t: Any=2 ,\t\t\t\t\tlowerCAmelCase\t: str=14_08 ,\t\t\t\t\t**lowerCAmelCase\t: List[Any] ,\t\t\t\t\t)\t\t\t\t\t\t->\t\t\t\t\tstr:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r super().__init__(pad_token_id=lowerCAmelCase ,\t\t\t\t\t**lowerCAmelCase)\r\r lowercase__ =\tvocab_size\r lowercase__ =\thidden_size\r lowercase__ =\tnum_hidden_layers\r lowercase__ =\tnum_attention_heads\r lowercase__ =\thidden_act\r lowercase__ =\tintermediate_size\r lowercase__ =\thidden_dropout_prob\r lowercase__ =\tattention_probs_dropout_prob\r lowercase__ =\tmax_position_embeddings\r lowercase__ =\tinitializer_range\r lowercase__ =\tlayer_norm_eps\r lowercase__ =\tposition_embedding_type\r lowercase__ =\tcross_attention_frequency\r lowercase__ =\tencoder_hidden_size\r\r\r\r @classmethod\r def UpperCAmelCase\t\t( cls\t: List[str] ,\t\t\t\t\tlowerCAmelCase\t: Union[str, os.PathLike] ,\t\t\t\t\t**lowerCAmelCase\t: Any)\t\t\t\t\t\t->\t\t\t\t\t\"PretrainedConfig\":\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r cls._set_token_in_kwargs(lowerCAmelCase)\r\r lowercase__,\t\t\t\t\t\tlowercase__ =\tcls.get_config_dict(lowerCAmelCase ,\t\t\t\t\t**lowerCAmelCase)\r\r # get the qformer config dict if we are loading from InstructBlipConfig\r if config_dict.get('model_type') == \"instructblip\":\r lowercase__ =\tconfig_dict['qformer_config']\r\r if \"model_type\" in config_dict and hasattr(cls ,\t\t\t\t\t'model_type') and config_dict[\"model_type\"] != cls.model_type:\r logger.warning(\r f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''\r f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')\r\r return cls.from_dict(lowerCAmelCase ,\t\t\t\t\t**lowerCAmelCase)\r\r\r\rclass UpperCAmelCase__(\t\t\t\tlowerCamelCase\t\t\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r A :\tTuple\t\t\t\t\t\t\t\t\t\t\t\t= \"instructblip\"\r A :\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t= True\r\r\r def __init__( self\t: Optional[int] ,\t\t\t\t\tlowerCAmelCase\t: Any=None ,\t\t\t\t\tlowerCAmelCase\t: Optional[Any]=None ,\t\t\t\t\tlowerCAmelCase\t: Dict=None ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=32 ,\t\t\t\t\t**lowerCAmelCase\t: int)\t\t\t\t\t\t->\t\t\t\t\tOptional[Any]:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r super().__init__(**lowerCAmelCase)\r\r if vision_config is None:\r lowercase__ =\t{}\r logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')\r\r if qformer_config is None:\r lowercase__ =\t{}\r logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')\r\r if text_config is None:\r lowercase__ =\t{}\r logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')\r\r lowercase__ =\tInstructBlipVisionConfig(**lowerCAmelCase)\r lowercase__ =\tInstructBlipQFormerConfig(**lowerCAmelCase)\r lowercase__ =\ttext_config['model_type'] if 'model_type' in text_config else 'opt'\r lowercase__ =\tCONFIG_MAPPING[text_model_type](**lowerCAmelCase)\r\r lowercase__ =\tself.text_config.tie_word_embeddings\r lowercase__ =\tself.text_config.is_encoder_decoder\r\r lowercase__ =\tnum_query_tokens\r lowercase__ =\tself.vision_config.hidden_size\r lowercase__ =\tself.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\r lowercase__ =\t1.0\r lowercase__ =\t0.02\r\r\r @classmethod\r def UpperCAmelCase\t\t( cls\t: Dict ,\t\t\t\t\tlowerCAmelCase\t: InstructBlipVisionConfig ,\t\t\t\t\tlowerCAmelCase\t: InstructBlipQFormerConfig ,\t\t\t\t\tlowerCAmelCase\t: PretrainedConfig ,\t\t\t\t\t**lowerCAmelCase\t: Tuple ,\t\t\t\t\t)\t\t\t\t\t\t->\t\t\t\t\tList[str]:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r return cls(\r vision_config=vision_config.to_dict() ,\t\t\t\t\tqformer_config=qformer_config.to_dict() ,\t\t\t\t\ttext_config=text_config.to_dict() ,\t\t\t\t\t**lowerCAmelCase ,\t\t\t\t\t)\r\r\r\r def UpperCAmelCase\t\t( self\t: Optional[Any])\t\t\t\t\t\t->\t\t\t\t\tUnion[str, Any]:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r lowercase__ =\tcopy.deepcopy(self.__dict__)\r lowercase__ =\tself.vision_config.to_dict()\r lowercase__ =\tself.qformer_config.to_dict()\r lowercase__ =\tself.text_config.to_dict()\r lowercase__ =\tself.__class__.model_type\r return output\r"},"code_codestyle":{"kind":"number","value":622,"string":"622"},"style_context":{"kind":"string","value":"\r\r\r\rfrom sklearn.metrics import matthews_corrcoef\r\rimport datasets\r\r\ra__ :\t\t\t\t\tAny \t\t\t\t\t\t= \"\\nCompute the Matthews correlation coefficient (MCC)\\n\\nThe Matthews correlation coefficient is used in machine learning as a\\nmeasure of the quality of binary and multiclass classifications. It takes\\ninto account true and false positives and negatives and is generally\\nregarded as a balanced measure which can be used even if the classes are of\\nvery different sizes. The MCC is in essence a correlation coefficient value\\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\\nan average random prediction and -1 an inverse prediction. The statistic\\nis also known as the phi coefficient. [source: Wikipedia]\\n\"\r\ra__ :\t\t\t\t\tAny \t\t\t\t\t\t= \"\\nArgs:\\n predictions (list of int): Predicted labels, as returned by a model.\\n references (list of int): Ground truth labels.\\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\\nReturns:\\n matthews_correlation (dict containing float): Matthews correlation.\\nExamples:\\n Example 1, a basic example with only predictions and references as inputs:\\n >>> matthews_metric = datasets.load_metric(\\\"matthews_correlation\\\")\\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\\n ... predictions=[1, 2, 2, 0, 3, 3])\\n >>> print(round(results['matthews_correlation'], 2))\\n 0.54\\n\\n Example 2, the same example as above, but also including sample weights:\\n >>> matthews_metric = datasets.load_metric(\\\"matthews_correlation\\\")\\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\\n ... predictions=[1, 2, 2, 0, 3, 3],\\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\\n >>> print(round(results['matthews_correlation'], 2))\\n 0.1\\n\\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\\n >>> matthews_metric = datasets.load_metric(\\\"matthews_correlation\\\")\\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\\n ... predictions=[1, 2, 2, 0, 3, 3],\\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\\n >>> print(round(results['matthews_correlation'], 2))\\n -0.25\\n\"\r\ra__ :\t\t\t\t\tstr \t\t\t\t\t\t= \"\\\\n@article{scikit-learn,\\n title={Scikit-learn: Machine Learning in {P}ython},\\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\\n journal={Journal of Machine Learning Research},\\n volume={12},\\n pages={2825--2830},\\n year={2011}\\n}\\n\"\r\r\r\r@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,\t\t\t\t_KWARGS_DESCRIPTION\t\t\t\t\t\t)\rclass UpperCAmelCase__(\t\t\t\tdatasets.Metric\t\t\t\t\t\t):\r\r '''simple docstring'''\r\r\r\r\r def UpperCAmelCase\t\t( self\t: List[Any])\t\t\t\t\t\t->\t\t\t\t\tint:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r return datasets.MetricInfo(\r description=_DESCRIPTION ,\t\t\t\t\tcitation=_CITATION ,\t\t\t\t\tinputs_description=_KWARGS_DESCRIPTION ,\t\t\t\t\tfeatures=datasets.Features(\r {\r 'predictions': datasets.Value('int32'),\r 'references': datasets.Value('int32'),\r }) ,\t\t\t\t\treference_urls=[\r 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'\r ] ,\t\t\t\t\t)\r\r\r\r def UpperCAmelCase\t\t( self\t: Dict ,\t\t\t\t\tlowerCAmelCase\t: Dict ,\t\t\t\t\tlowerCAmelCase\t: Optional[int] ,\t\t\t\t\tlowerCAmelCase\t: Union[str, Any]=None)\t\t\t\t\t\t->\t\t\t\t\tAny:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r return {\r \"matthews_correlation\": float(matthews_corrcoef(lowerCAmelCase ,\t\t\t\t\tlowerCAmelCase ,\t\t\t\t\tsample_weight=lowerCAmelCase)),\r }\r"},"style_context_codestyle":{"kind":"number","value":622,"string":"622"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":519,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tf_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\nlowercase\t\t\t = {\r\n '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']\r\n}\r\n\r\ntry:\r\n\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\tlowercase\t\t\t = ['''ConvNextFeatureExtractor''']\r\n\t\t\t\t\tlowercase\t\t\t = ['''ConvNextImageProcessor''']\r\n\r\ntry:\r\n\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\tlowercase\t\t\t = [\r\n\t\t\t\t\t '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',\r\n\t\t\t\t\t '''ConvNextForImageClassification''',\r\n\t\t\t\t\t '''ConvNextModel''',\r\n\t\t\t\t\t '''ConvNextPreTrainedModel''',\r\n\t\t\t\t\t '''ConvNextBackbone''',\r\n\t\t\t\t\t]\r\n\r\ntry:\r\n\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\tpass\r\nelse:\r\n\t\t\t\t\tlowercase\t\t\t = [\r\n\t\t\t\t\t '''TFConvNextForImageClassification''',\r\n\t\t\t\t\t '''TFConvNextModel''',\r\n\t\t\t\t\t '''TFConvNextPreTrainedModel''',\r\n\t\t\t\t\t]\r\n\r\nif TYPE_CHECKING:\r\n\t\t\t\t\tfrom .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_vision_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .feature_extraction_convnext import ConvNextFeatureExtractor\r\n\t\t\t\t\t\t\t\t\t\tfrom .image_processing_convnext import ConvNextImageProcessor\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_torch_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_convnext import (\r\n\t\t\t\t\t\t\t\t\t\t CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t\t\t\t\t\t\t\t\t ConvNextBackbone,\r\n\t\t\t\t\t\t\t\t\t\t ConvNextForImageClassification,\r\n\t\t\t\t\t\t\t\t\t\t ConvNextModel,\r\n\t\t\t\t\t\t\t\t\t\t ConvNextPreTrainedModel,\r\n\t\t\t\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\t\t\tif not is_tf_available():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\r\n\t\t\t\t\texcept OptionalDependencyNotAvailable:\r\n\t\t\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfrom .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel\r\n\r\n\r\nelse:\r\n\t\t\t\t\timport sys\r\n\r\n\t\t\t\t\tlowercase\t\t\t = _LazyModule(__name__, globals()['''__file__'''], _import_structure)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":717,"string":"717"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\nimport json\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\nfrom transformers import BertTokenizer, BertTokenizerFast\r\nfrom transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES\r\nfrom transformers.testing_utils import require_vision\r\nfrom transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available\r\n\r\n\r\nif is_vision_available():\r\n\t\t\t\t\tfrom PIL import Image\r\n\r\n\t\t\t\t\tfrom transformers import AlignProcessor, EfficientNetImageProcessor\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_vision\r\nclass __lowerCamelCase ( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= tempfile.mkdtemp()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t= [\r\n\t\t\t\t\t\t \"[UNK]\",\r\n\t\t\t\t\t\t \"[CLS]\",\r\n\t\t\t\t\t\t \"[SEP]\",\r\n\t\t\t\t\t\t \"[PAD]\",\r\n\t\t\t\t\t\t \"[MASK]\",\r\n\t\t\t\t\t\t \"want\",\r\n\t\t\t\t\t\t \"##want\",\r\n\t\t\t\t\t\t \"##ed\",\r\n\t\t\t\t\t\t \"wa\",\r\n\t\t\t\t\t\t \"un\",\r\n\t\t\t\t\t\t \"runn\",\r\n\t\t\t\t\t\t \"##ing\",\r\n\t\t\t\t\t\t \",\",\r\n\t\t\t\t\t\t \"low\",\r\n\t\t\t\t\t\t \"lowest\",\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= os.path.join(self.tmpdirname\t\t\t, VOCAB_FILES_NAMES[\"vocab_file\"]\t\t\t\t)\r\n\t\t\t\t\t\twith open(self.vocab_file\t\t\t, \"w\"\t\t\t, encoding=\"utf-8\"\t\t\t\t) as vocab_writer:\r\n\t\t\t\t\t\t\t\t\t\t\tvocab_writer.write(\"\".join([x + \"\\n\" for x in vocab_tokens]\t\t\t\t)\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= {\r\n\t\t\t\t\t\t \"do_resize\": True,\r\n\t\t\t\t\t\t \"size\": 20,\r\n\t\t\t\t\t\t \"do_center_crop\": True,\r\n\t\t\t\t\t\t \"crop_size\": 18,\r\n\t\t\t\t\t\t \"do_normalize\": True,\r\n\t\t\t\t\t\t \"image_mean\": [0.48145466, 0.4578275, 0.40821073],\r\n\t\t\t\t\t\t \"image_std\": [0.26862954, 0.26130258, 0.27577711],\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= os.path.join(self.tmpdirname\t\t\t, a__\t\t\t\t)\r\n\t\t\t\t\t\twith open(self.image_processor_file\t\t\t, \"w\"\t\t\t, encoding=\"utf-8\"\t\t\t\t) as fp:\r\n\t\t\t\t\t\t\t\t\t\t\tjson.dump(a__\t\t\t, a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t, **a__\t\t\t\t):\r\n\t\t\t\t\t\treturn BertTokenizer.from_pretrained(self.tmpdirname\t\t\t, **a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t, **a__\t\t\t\t):\r\n\t\t\t\t\t\treturn BertTokenizerFast.from_pretrained(self.tmpdirname\t\t\t, **a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t, **a__\t\t\t\t):\r\n\t\t\t\t\t\treturn EfficientNetImageProcessor.from_pretrained(self.tmpdirname\t\t\t, **a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= [np.random.randint(255\t\t\t, size=(3, 30, 400)\t\t\t, dtype=np.uinta\t\t\t\t)]\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= [Image.fromarray(np.moveaxis(a__\t\t\t, 0\t\t\t, -1\t\t\t\t)\t\t\t\t) for x in image_inputs]\r\n\t\t\t\t\t\treturn image_inputs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= self.get_rust_tokenizer()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= self.get_image_processor()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\t\t\t\t\t\tprocessor_slow.save_pretrained(self.tmpdirname\t\t\t\t)\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= AlignProcessor.from_pretrained(self.tmpdirname\t\t\t, use_fast=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\t\t\t\t\t\tprocessor_fast.save_pretrained(self.tmpdirname\t\t\t\t)\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= AlignProcessor.from_pretrained(self.tmpdirname\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor_slow.tokenizer.get_vocab()\t\t\t, tokenizer_slow.get_vocab()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertEqual(processor_fast.tokenizer.get_vocab()\t\t\t, tokenizer_fast.get_vocab()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertEqual(tokenizer_slow.get_vocab()\t\t\t, tokenizer_fast.get_vocab()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor_slow.tokenizer\t\t\t, a__\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor_fast.tokenizer\t\t\t, a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor_slow.image_processor.to_json_string()\t\t\t, image_processor.to_json_string()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertEqual(processor_fast.image_processor.to_json_string()\t\t\t, image_processor.to_json_string()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor_slow.image_processor\t\t\t, a__\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor_fast.image_processor\t\t\t, a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t= AlignProcessor(tokenizer=self.get_tokenizer()\t\t\t, image_processor=self.get_image_processor()\t\t\t\t)\r\n\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= self.get_tokenizer(bos_token=\"(BOS)\"\t\t\t, eos_token=\"(EOS)\"\t\t\t\t)\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= self.get_image_processor(do_normalize=a__\t\t\t, padding_value=1.0\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t= AlignProcessor.from_pretrained(\r\n\t\t\t\t\t\t self.tmpdirname\t\t\t, bos_token=\"(BOS)\"\t\t\t, eos_token=\"(EOS)\"\t\t\t, do_normalize=a__\t\t\t, padding_value=1.0\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab()\t\t\t, tokenizer_add_kwargs.get_vocab()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer\t\t\t, a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertEqual(processor.image_processor.to_json_string()\t\t\t, image_processor_add_kwargs.to_json_string()\t\t\t\t)\r\n\t\t\t\t\t\tself.assertIsInstance(processor.image_processor\t\t\t, a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tDict\t\t\t\t\t\t\t= self.get_image_processor()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= self.prepare_image_inputs()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= image_processor(a__\t\t\t, return_tensors=\"np\"\t\t\t\t)\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tDict\t\t\t\t\t\t\t= processor(images=a__\t\t\t, return_tensors=\"np\"\t\t\t\t)\r\n\r\n\t\t\t\t\t\tfor key in input_image_proc.keys():\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_image_proc[key].sum()\t\t\t, input_processor[key].sum()\t\t\t, delta=1e-2\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= self.get_image_processor()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tint\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t= \"lower newer\"\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= processor(text=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tint\t\t\t\t\t\t\t= tokenizer(a__\t\t\t, padding=\"max_length\"\t\t\t, max_length=64\t\t\t\t)\r\n\r\n\t\t\t\t\t\tfor key in encoded_tok.keys():\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key]\t\t\t, encoded_processor[key]\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t= self.get_image_processor()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= \"lower newer\"\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t= self.prepare_image_inputs()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t= processor(text=a__\t\t\t, images=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(list(inputs.keys()\t\t\t\t)\t\t\t, [\"input_ids\", \"token_type_ids\", \"attention_mask\", \"pixel_values\"]\t\t\t\t)\r\n\r\n\t\t\t\t\t\t# test if it raises when no input is passed\r\n\t\t\t\t\t\twith pytest.raises(a__\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\tprocessor()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t= self.get_image_processor()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tDict\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tDict\t\t\t\t\t\t\t= processor.batch_decode(a__\t\t\t\t)\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= tokenizer.batch_decode(a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(a__\t\t\t, a__\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef a_\t\t\t\t(\t\t\tself\t\t\t\t):\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t= self.get_image_processor()\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tTuple\t\t\t\t\t\t\t= self.get_tokenizer()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tstr\t\t\t\t\t\t\t= AlignProcessor(tokenizer=a__\t\t\t, image_processor=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t= \"lower newer\"\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t= self.prepare_image_inputs()\r\n\r\n\t\t\t\t\t\t__SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\t\t\t\t\tList[str]\t\t\t\t\t\t\t= processor(text=a__\t\t\t, images=a__\t\t\t\t)\r\n\r\n\t\t\t\t\t\tself.assertListEqual(list(inputs.keys()\t\t\t\t)\t\t\t, processor.model_input_names\t\t\t\t)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":564,"string":"564"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":520,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections.abc import Callable\r\n\r\n__A\t\t\t\t\t\t\t:\t\t\tint =\t\t\tlist[list[float | int]]\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t-> Matrix:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _A =\t\t\t\t\t\t\tlen(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\t[[0 for _ in range(size + 1\t\t\t\t)] for _ in range(_SCREAMING_SNAKE_CASE\t\t\t\t)]\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n\r\n for row in range(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n for col in range(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n _A =\t\t\t\t\t\t\tmatrix[row][col]\r\n\r\n _A =\t\t\t\t\t\t\tvector[row][0]\r\n\r\n _A =\t\t\t\t\t\t\t0\r\n _A =\t\t\t\t\t\t\t0\r\n while row < size and col < size:\r\n # pivoting\r\n _A =\t\t\t\t\t\t\tmax((abs(augmented[rowa][col]\t\t\t\t), rowa) for rowa in range(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t)[\r\n 1\r\n ]\r\n if augmented[pivot_row][col] == 0:\r\n col += 1\r\n continue\r\n else:\r\n _A,\t\t\t_A =\t\t\t\t\t\t\taugmented[pivot_row], augmented[row]\r\n\r\n for rowa in range(row + 1\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t):\r\n _A =\t\t\t\t\t\t\taugmented[rowa][col] / augmented[row][col]\r\n _A =\t\t\t\t\t\t\t0\r\n for cola in range(col + 1\t\t\t\t\t\t\t, size + 1\t\t\t\t):\r\n augmented[rowa][cola] -= augmented[row][cola] * ratio\r\n\r\n row += 1\r\n col += 1\r\n\r\n # back substitution\r\n for col in range(1\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t):\r\n for row in range(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n _A =\t\t\t\t\t\t\taugmented[row][col] / augmented[col][col]\r\n for cola in range(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, size + 1\t\t\t\t):\r\n augmented[row][cola] -= augmented[col][cola] * ratio\r\n\r\n # round to get rid of numbers like 2.000000000000004\r\n return [\r\n [round(augmented[row][size] / augmented[row][row]\t\t\t\t\t\t\t, 10\t\t\t\t)] for row in range(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n ]\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t-> Callable[[int], int]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _A =\t\t\t\t\t\t\tlen(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\t[[0 for _ in range(_SCREAMING_SNAKE_CASE\t\t\t\t)] for _ in range(_SCREAMING_SNAKE_CASE\t\t\t\t)]\r\n _A =\t\t\t\t\t\t\t[[0] for _ in range(_SCREAMING_SNAKE_CASE\t\t\t\t)]\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n\r\n for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n for col in range(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n _A =\t\t\t\t\t\t\t(x_val + 1) ** (size - col - 1)\r\n _A =\t\t\t\t\t\t\ty_val\r\n\r\n _A =\t\t\t\t\t\t\tsolve(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n def interpolated_func(_SCREAMING_SNAKE_CASE\t\t\t\t) -> int:\r\n return sum(\r\n round(coeffs[x_val][0]\t\t\t\t) * (var ** (size - x_val - 1))\r\n for x_val in range(_SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t)\r\n\r\n return interpolated_func\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n return (\r\n 1\r\n - variable\r\n + variable**2\r\n - variable**3\r\n + variable**4\r\n - variable**5\r\n + variable**6\r\n - variable**7\r\n + variable**8\r\n - variable**9\r\n + variable**10\r\n )\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE = question_function\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE = 10\t\t\t\t)\t\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _A =\t\t\t\t\t\t\t[func(_SCREAMING_SNAKE_CASE\t\t\t\t) for x_val in range(1\t\t\t\t\t\t\t, order + 1\t\t\t\t)]\r\n\r\n _A =\t\t\t\t\t\t\t[\r\n interpolate(data_points[:max_coeff]\t\t\t\t) for max_coeff in range(1\t\t\t\t\t\t\t, order + 1\t\t\t\t)\r\n ]\r\n\r\n _A =\t\t\t\t\t\t\t0\r\n _A =\t\t\t\t\t\t\t42\r\n _A =\t\t\t\t\t\t\t42\r\n\r\n for poly in polynomials:\r\n _A =\t\t\t\t\t\t\t1\r\n while func(_SCREAMING_SNAKE_CASE\t\t\t\t) == poly(_SCREAMING_SNAKE_CASE\t\t\t\t):\r\n x_val += 1\r\n\r\n ret += poly(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n return ret\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f\"{solution() = }\")\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":27,"string":"27"},"style_context":{"kind":"string","value":"\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\n\r\n\r\nclass lowerCAmelCase\t\t\t\t\t:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\t\tself :\tstr ,\t\t\t\t\t\t__snake_case :\tint = 6 ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n lowerCamelCase = None\r\n lowerCamelCase = None\r\n self.create_linked_list(__snake_case )\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tOptional[int] ,\t\t\t\t\t\t__snake_case :\tint ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n lowerCamelCase = Node()\r\n lowerCamelCase = current_node\r\n lowerCamelCase = current_node\r\n lowerCamelCase = current_node\r\n for _ in range(1 ,\t\t\t\t\t\t__snake_case ):\r\n lowerCamelCase = Node()\r\n lowerCamelCase = current_node\r\n lowerCamelCase = previous_node\r\n lowerCamelCase = current_node\r\n lowerCamelCase = self.front\r\n lowerCamelCase = previous_node\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tList[str] ) ->\t\t\tbool:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n return (\r\n self.front == self.rear\r\n and self.front is not None\r\n and self.front.data is None\r\n )\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tTuple ) ->\t\t\tAny | None:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n self.check_can_perform_operation()\r\n return self.front.data if self.front else None\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tOptional[int] ,\t\t\t\t\t\t__snake_case :\tAny ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n if self.rear is None:\r\n return\r\n\r\n self.check_is_full()\r\n if not self.is_empty():\r\n lowerCamelCase = self.rear.next\r\n if self.rear:\r\n lowerCamelCase = data\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tOptional[Any] ) ->\t\t\tAny:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n self.check_can_perform_operation()\r\n if self.rear is None or self.front is None:\r\n return None\r\n if self.front == self.rear:\r\n lowerCamelCase = self.front.data\r\n lowerCamelCase = None\r\n return data\r\n\r\n lowerCamelCase = self.front\r\n lowerCamelCase = old_front.next\r\n lowerCamelCase = old_front.data\r\n lowerCamelCase = None\r\n return data\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tOptional[int] ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n if self.is_empty():\r\n raise Exception('Empty Queue' )\r\n\r\n\r\n\r\n\r\n\r\n def \tlowerCamelCase__ (\t\t\t\t\t\tself :\tint ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n if self.rear and self.rear.next == self.front:\r\n raise Exception('Full Queue' )\r\n\r\n\r\nclass lowerCAmelCase\t\t\t\t\t:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\t\tself :\tint ) ->\t\t\tNone:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n lowerCamelCase = None\r\n lowerCamelCase = None\r\n lowerCamelCase = None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":246,"string":"246"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":521,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\r'''simple docstring'''\r\rimport shutil\rimport tempfile\rimport unittest\rfrom unittest.mock import patch\r\rfrom transformers import (\r DefaultFlowCallback,\r IntervalStrategy,\r PrinterCallback,\r ProgressCallback,\r Trainer,\r TrainerCallback,\r TrainingArguments,\r is_torch_available,\r)\rfrom transformers.testing_utils import require_torch\r\r\rif is_torch_available():\r from transformers.trainer import DEFAULT_CALLBACKS\r\r from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel\r\r\r\r\r\r\r\rclass \tSCREAMING_SNAKE_CASE\t( lowerCAmelCase_ ):\r\r\r\r\r def __init__(\t\t\t\t\tself : Dict\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\t[]\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Optional[Any]\t, A__ : Dict\t, A__ : Optional[int]\t, A__ : Tuple\t, **A__ : Tuple\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_init_end\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : str\t, A__ : List[Any]\t, A__ : Tuple\t, A__ : Tuple\t, **A__ : Optional[Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_train_begin\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Tuple\t, A__ : List[Any]\t, A__ : List[str]\t, A__ : List[str]\t, **A__ : Tuple\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_train_end\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Union[str, Any]\t, A__ : str\t, A__ : str\t, A__ : Optional[Any]\t, **A__ : Dict\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_epoch_begin\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : List[Any]\t, A__ : List[str]\t, A__ : List[Any]\t, A__ : Dict\t, **A__ : List[str]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_epoch_end\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : int\t, A__ : Optional[Any]\t, A__ : Any\t, A__ : Any\t, **A__ : int\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_step_begin\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Any\t, A__ : int\t, A__ : Any\t, A__ : Tuple\t, **A__ : int\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_step_end\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : str\t, A__ : int\t, A__ : Optional[Any]\t, A__ : str\t, **A__ : Optional[Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_evaluate\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : str\t, A__ : str\t, A__ : Union[str, Any]\t, A__ : Any\t, **A__ : int\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_predict\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : str\t, A__ : str\t, A__ : int\t, A__ : List[Any]\t, **A__ : Dict\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_save\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : List[str]\t, A__ : str\t, A__ : List[str]\t, A__ : Any\t, **A__ : Dict\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_log\"\"\"\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : int\t, A__ : List[Any]\t, A__ : List[str]\t, A__ : Any\t, **A__ : Union[str, Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.events.append(\"\"\"on_prediction_step\"\"\"\t\t\t\t\t)\r\r\r\r\r\r\r\r@require_torch\rclass \tSCREAMING_SNAKE_CASE\t( unittest.TestCase ):\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Optional[int]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\ttempfile.mkdtemp()\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Union[str, Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r shutil.rmtree(self.output_dir\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Optional[Any]\t, A__ : Any=0\t, A__ : Any=0\t, A__ : Dict=64\t, A__ : Dict=64\t, A__ : List[str]=None\t, A__ : Union[str, Any]=False\t, **A__ : Tuple\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: Optional[int]\t\t\t\t =\t\t\t\tRegressionDataset(length=A__\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: Optional[int]\t\t\t\t =\t\t\t\tRegressionDataset(length=A__\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\tRegressionModelConfig(a=A__\t, b=A__\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: Optional[Any]\t\t\t\t =\t\t\t\tRegressionPreTrainedModel(A__\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: List[str]\t\t\t\t =\t\t\t\tTrainingArguments(self.output_dir\t, disable_tqdm=A__\t, report_to=[]\t, **A__\t\t\t\t\t)\r return Trainer(\r A__\t, A__\t, train_dataset=A__\t, eval_dataset=A__\t, callbacks=A__\t, )\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : List[str]\t, A__ : Optional[int]\t, A__ : Dict\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r self.assertEqual(len(A__\t\t\t\t\t)\t, len(A__\t\t\t\t\t)\t\t\t\t\t)\r\r # Order doesn't matter\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tsorted(A__\t, key=lambda A__\t\t\t\t\t: cb.__name__ if isinstance(A__\t, A__\t\t\t\t\t) else cb.__class__.__name__\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: int\t\t\t\t =\t\t\t\tsorted(A__\t, key=lambda A__\t\t\t\t\t: cb.__name__ if isinstance(A__\t, A__\t\t\t\t\t) else cb.__class__.__name__\t\t\t\t\t)\r\r for cba, cba in zip(A__\t, A__\t\t\t\t\t):\r if isinstance(A__\t, A__\t\t\t\t\t) and isinstance(A__\t, A__\t\t\t\t\t):\r self.assertEqual(A__\t, A__\t\t\t\t\t)\r elif isinstance(A__\t, A__\t\t\t\t\t) and not isinstance(A__\t, A__\t\t\t\t\t):\r self.assertEqual(A__\t, cba.__class__\t\t\t\t\t)\r elif not isinstance(A__\t, A__\t\t\t\t\t) and isinstance(A__\t, A__\t\t\t\t\t):\r self.assertEqual(cba.__class__\t, A__\t\t\t\t\t)\r else:\r self.assertEqual(A__\t, A__\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Optional[int]\t, A__ : List[Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: Tuple\t\t\t\t =\t\t\t\t[\"\"\"on_init_end\"\"\", \"\"\"on_train_begin\"\"\"]\r __lowerCamelCase\t\t\t\t\t\t: str\t\t\t\t =\t\t\t\t0\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tlen(trainer.get_eval_dataloader()\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\t[\"\"\"on_prediction_step\"\"\"] * len(trainer.get_eval_dataloader()\t\t\t\t\t) + [\"\"\"on_log\"\"\", \"\"\"on_evaluate\"\"\"]\r for _ in range(trainer.state.num_train_epochs\t\t\t\t\t):\r expected_events.append(\"\"\"on_epoch_begin\"\"\"\t\t\t\t\t)\r for _ in range(A__\t\t\t\t\t):\r step += 1\r expected_events += [\"on_step_begin\", \"on_step_end\"]\r if step % trainer.args.logging_steps == 0:\r expected_events.append(\"\"\"on_log\"\"\"\t\t\t\t\t)\r if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:\r expected_events += evaluation_events.copy()\r if step % trainer.args.save_steps == 0:\r expected_events.append(\"\"\"on_save\"\"\"\t\t\t\t\t)\r expected_events.append(\"\"\"on_epoch_end\"\"\"\t\t\t\t\t)\r if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:\r expected_events += evaluation_events.copy()\r expected_events += [\"on_log\", \"on_train_end\"]\r return expected_events\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : Union[str, Any]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: int\t\t\t\t =\t\t\t\tself.get_trainer()\r __lowerCamelCase\t\t\t\t\t\t: List[str]\t\t\t\t =\t\t\t\tDEFAULT_CALLBACKS.copy() + [ProgressCallback]\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r # Callbacks passed at init are added to the default callbacks\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t\t\t\t\t)\r expected_callbacks.append(A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tself.get_trainer(disable_tqdm=A__\t\t\t\t\t)\r __lowerCamelCase\t\t\t\t\t\t: int\t\t\t\t =\t\t\t\tDEFAULT_CALLBACKS.copy() + [PrinterCallback]\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : List[str]\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tDEFAULT_CALLBACKS.copy() + [ProgressCallback]\r __lowerCamelCase\t\t\t\t\t\t: Optional[int]\t\t\t\t =\t\t\t\tself.get_trainer()\r\r # We can add, pop, or remove by class name\r trainer.remove_callback(A__\t\t\t\t\t)\r expected_callbacks.remove(A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tself.get_trainer()\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\ttrainer.pop_callback(A__\t\t\t\t\t)\r self.assertEqual(cb.__class__\t, A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r trainer.add_callback(A__\t\t\t\t\t)\r expected_callbacks.insert(0\t, A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r # We can also add, pop, or remove by instance\r __lowerCamelCase\t\t\t\t\t\t: Optional[int]\t\t\t\t =\t\t\t\tself.get_trainer()\r __lowerCamelCase\t\t\t\t\t\t: Union[str, Any]\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[0]\r trainer.remove_callback(A__\t\t\t\t\t)\r expected_callbacks.remove(A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: int\t\t\t\t =\t\t\t\tself.get_trainer()\r __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[0]\r __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t =\t\t\t\ttrainer.pop_callback(A__\t\t\t\t\t)\r self.assertEqual(A__\t, A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r trainer.add_callback(A__\t\t\t\t\t)\r expected_callbacks.insert(0\t, A__\t\t\t\t\t)\r self.check_callbacks_equality(trainer.callback_handler.callbacks\t, A__\t\t\t\t\t)\r\r\r\r\r def \t\t\t\t\ta_\t\t\t\t\t(\t\t\t\t\tself : str\t\t\t\t\t):\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r import warnings\r\r # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested\r warnings.simplefilter(action=\"\"\"ignore\"\"\"\t, category=A__\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t\t\t\t\t)\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: Union[str, Any]\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r # Independent log/save/eval\r __lowerCamelCase\t\t\t\t\t\t: Optional[int]\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t, logging_steps=5\t\t\t\t\t)\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: Dict\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: str\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t, save_steps=5\t\t\t\t\t)\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t, eval_steps=5\t, evaluation_strategy=\"\"\"steps\"\"\"\t\t\t\t\t)\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\tself.get_trainer(callbacks=[MyTestTrainerCallback]\t, evaluation_strategy=\"\"\"epoch\"\"\"\t\t\t\t\t)\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r # A bit of everything\r __lowerCamelCase\t\t\t\t\t\t: Any\t\t\t\t =\t\t\t\tself.get_trainer(\r callbacks=[MyTestTrainerCallback]\t, logging_steps=3\t, save_steps=10\t, eval_steps=5\t, evaluation_strategy=\"\"\"steps\"\"\"\t, )\r trainer.train()\r __lowerCamelCase\t\t\t\t\t\t: Tuple\t\t\t\t =\t\t\t\ttrainer.callback_handler.callbacks[-2].events\r self.assertEqual(A__\t, self.get_expected_events(A__\t\t\t\t\t)\t\t\t\t\t)\r\r # warning should be emitted for duplicated callbacks\r with patch(\"\"\"transformers.trainer_callback.logger.warning\"\"\"\t\t\t\t\t) as warn_mock:\r __lowerCamelCase\t\t\t\t\t\t: Union[str, Any]\t\t\t\t =\t\t\t\tself.get_trainer(\r callbacks=[MyTestTrainerCallback, MyTestTrainerCallback]\t, )\r assert str(A__\t\t\t\t\t) in warn_mock.call_args[0][0]\r\r\r"},"code_codestyle":{"kind":"number","value":720,"string":"720"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\r'''simple docstring'''\r\rimport argparse\r\rimport torch\r\rfrom transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert\rfrom transformers.utils import logging\r\r\rlogging.set_verbosity_info()\rdef __lowercase (_lowercase,\t\t\t\t\t\t_lowercase,\t\t\t\t\t\t_lowercase ) -> Optional[Any]:\r\r\r \"\"\"simple docstring\"\"\"\r # Initialise PyTorch model\r __lowerCamelCase\t\t\t\t\t\t: str\t\t\t\t =\t\t\t\tRemBertConfig.from_json_file(_lowercase )\r print(\"\"\"Building PyTorch model from configuration: {}\"\"\".format(str(_lowercase ) ) )\r __lowerCamelCase\t\t\t\t\t\t: List[Any]\t\t\t\t =\t\t\t\tRemBertModel(_lowercase )\r\r # Load weights from tf checkpoint\r load_tf_weights_in_rembert(_lowercase,\t\t\t\t\t\t_lowercase,\t\t\t\t\t\t_lowercase )\r\r # Save pytorch-model\r print(\"\"\"Save PyTorch model to {}\"\"\".format(_lowercase ) )\r torch.save(model.state_dict(),\t\t\t\t\t\t_lowercase )\r\r\rif __name__ == \"__main__\":\r UpperCAmelCase__\t\t\t\t\t\t:Tuple =\t\t\targparse.ArgumentParser()\r # Required parameters\r parser.add_argument(\r \"\"\"--tf_checkpoint_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the TensorFlow checkpoint path.\"\"\"\r )\r parser.add_argument(\r \"\"\"--rembert_config_file\"\"\",\r default=None,\r type=str,\r required=True,\r help=(\r \"\"\"The config json file corresponding to the pre-trained RemBERT model. \\n\"\"\"\r \"\"\"This specifies the model architecture.\"\"\"\r ),\r )\r parser.add_argument(\r \"\"\"--pytorch_dump_path\"\"\", default=None, type=str, required=True, help=\"\"\"Path to the output PyTorch model.\"\"\"\r )\r UpperCAmelCase__\t\t\t\t\t\t:List[Any] =\t\t\tparser.parse_args()\r convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":483,"string":"483"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":522,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport numpy as np\r\n\r\n# Importing the Keras libraries and packages\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers, models\r\n\r\nif __name__ == \"__main__\":\r\n # Initialising the CNN\r\n # (Sequential- Building the model layer by layer)\r\n UpperCAmelCase_\t\t =\t\t\t\t\tmodels.Sequential()\r\n\r\n # Step 1 - Convolution\r\n # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel\r\n # (3,3) is the kernel size (filter matrix)\r\n classifier.add(\r\n layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation=\"\"\"relu\"\"\")\r\n )\r\n\r\n # Step 2 - Pooling\r\n classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))\r\n\r\n # Adding a second convolutional layer\r\n classifier.add(layers.ConvaD(3_2, (3, 3), activation=\"\"\"relu\"\"\"))\r\n classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))\r\n\r\n # Step 3 - Flattening\r\n classifier.add(layers.Flatten())\r\n\r\n # Step 4 - Full connection\r\n classifier.add(layers.Dense(units=1_2_8, activation=\"\"\"relu\"\"\"))\r\n classifier.add(layers.Dense(units=1, activation=\"\"\"sigmoid\"\"\"))\r\n\r\n # Compiling the CNN\r\n classifier.compile(\r\n optimizer=\"\"\"adam\"\"\", loss=\"\"\"binary_crossentropy\"\"\", metrics=[\"\"\"accuracy\"\"\"]\r\n )\r\n\r\n # Part 2 - Fitting the CNN to the images\r\n\r\n # Load Trained model weights\r\n\r\n # from keras.models import load_model\r\n # regressor=load_model('cnn.h5')\r\n\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttf.keras.preprocessing.image.ImageDataGenerator(\r\n rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True\r\n )\r\n\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)\r\n\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttrain_datagen.flow_from_directory(\r\n \"\"\"dataset/training_set\"\"\", target_size=(6_4, 6_4), batch_size=3_2, class_mode=\"\"\"binary\"\"\"\r\n )\r\n\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttest_datagen.flow_from_directory(\r\n \"\"\"dataset/test_set\"\"\", target_size=(6_4, 6_4), batch_size=3_2, class_mode=\"\"\"binary\"\"\"\r\n )\r\n\r\n classifier.fit_generator(\r\n training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set\r\n )\r\n\r\n classifier.save(\"\"\"cnn.h5\"\"\")\r\n\r\n # Part 3 - Making new predictions\r\n\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttf.keras.preprocessing.image.load_img(\r\n \"\"\"dataset/single_prediction/image.png\"\"\", target_size=(6_4, 6_4)\r\n )\r\n UpperCAmelCase_\t\t =\t\t\t\t\ttf.keras.preprocessing.image.img_to_array(test_image)\r\n UpperCAmelCase_\t\t =\t\t\t\t\tnp.expand_dims(test_image, axis=0)\r\n UpperCAmelCase_\t\t =\t\t\t\t\tclassifier.predict(test_image)\r\n # training_set.class_indices\r\n if result[0][0] == 0:\r\n UpperCAmelCase_\t\t =\t\t\t\t\t\"\"\"Normal\"\"\"\r\n if result[0][0] == 1:\r\n UpperCAmelCase_\t\t =\t\t\t\t\t\"\"\"Abnormality detected\"\"\"\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":2,"string":"2"},"style_context":{"kind":"string","value":"\r\n\r\nfrom __future__ import annotations\r\n\r\nimport unittest\r\n\r\nfrom transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available\r\nfrom transformers.testing_utils import require_tf, require_tokenizers, slow\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_tf_available():\r\n import tensorflow as tf\r\n\r\n from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel\r\n\r\n\r\n\r\n@require_tf\r\nclass lowercase\t\t:\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\tBlenderbotSmallConfig\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t{}\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t\"\"\"gelu\"\"\"\r\n\r\n\r\n\r\n\r\n def __init__(self\t\t\t,\t\t\t__a\t\t\t,\t\t\t__a=13\t\t\t,\t\t\t__a=7\t\t\t,\t\t\t__a=True\t\t\t,\t\t\t__a=False\t\t\t,\t\t\t__a=99\t\t\t,\t\t\t__a=32\t\t\t,\t\t\t__a=2\t\t\t,\t\t\t__a=4\t\t\t,\t\t\t__a=37\t\t\t,\t\t\t__a=0.1\t\t\t,\t\t\t__a=0.1\t\t\t,\t\t\t__a=20\t\t\t,\t\t\t__a=2\t\t\t,\t\t\t__a=1\t\t\t,\t\t\t__a=0\t\t\t,\t\t\t)\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tparent\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tbatch_size\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tseq_length\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tis_training\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tuse_labels\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tvocab_size\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\thidden_size\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tnum_hidden_layers\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tnum_attention_heads\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tintermediate_size\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\thidden_dropout_prob\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tattention_probs_dropout_prob\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax_position_embeddings\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\teos_token_id\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tpad_token_id\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tbos_token_id\r\n\r\n\r\n\r\n\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length - 1]\t\t\t,\t\t\tself.vocab_size\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size\t\t\t\t\t)\t\t\t,\t\t\t1\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.concat([input_ids, eos_tensor]\t\t\t,\t\t\taxis=1\t\t\t\t\t)\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t,\t\t\tself.vocab_size\t\t\t\t\t)\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.config_cls(\r\n vocab_size=self.vocab_size\t\t\t,\t\t\td_model=self.hidden_size\t\t\t,\t\t\tencoder_layers=self.num_hidden_layers\t\t\t,\t\t\tdecoder_layers=self.num_hidden_layers\t\t\t,\t\t\tencoder_attention_heads=self.num_attention_heads\t\t\t,\t\t\tdecoder_attention_heads=self.num_attention_heads\t\t\t,\t\t\tencoder_ffn_dim=self.intermediate_size\t\t\t,\t\t\tdecoder_ffn_dim=self.intermediate_size\t\t\t,\t\t\tdropout=self.hidden_dropout_prob\t\t\t,\t\t\tattention_dropout=self.attention_probs_dropout_prob\t\t\t,\t\t\tmax_position_embeddings=self.max_position_embeddings\t\t\t,\t\t\teos_token_ids=[2]\t\t\t,\t\t\tbos_token_id=self.bos_token_id\t\t\t,\t\t\tpad_token_id=self.pad_token_id\t\t\t,\t\t\tdecoder_start_token_id=self.pad_token_id\t\t\t,\t\t\t**self.config_updates\t\t\t,\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tprepare_blenderbot_small_inputs_dict(__a\t\t\t,\t\t\t__a\t\t\t,\t\t\t__a\t\t\t\t\t)\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t,\t\t\t__a\t\t\t,\t\t\t__a\t\t\t\t\t)\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tTFBlenderbotSmallModel(config=__a\t\t\t\t\t).get_decoder()\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tinputs_dict['input_ids']\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tinput_ids[:1, :]\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tinputs_dict['attention_mask'][:1, :]\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tinputs_dict['head_mask']\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\t1\r\n\r\n # first forward pass\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel(__a\t\t\t,\t\t\tattention_mask=__a\t\t\t,\t\t\thead_mask=__a\t\t\t,\t\t\tuse_cache=__a\t\t\t\t\t)\r\n\r\n UpperCAmelCase__\t\t\t\t\t\t\t, UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\toutputs.to_tuple()\r\n\r\n # create hypothetical next token and extent to next_input_ids\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tids_tensor((self.batch_size, 3)\t\t\t,\t\t\tconfig.vocab_size\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.cast(ids_tensor((self.batch_size, 3)\t\t\t,\t\t\t2\t\t\t\t\t)\t\t\t,\t\t\ttf.inta\t\t\t\t\t)\r\n\r\n # append to next input_ids and\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.concat([input_ids, next_tokens]\t\t\t,\t\t\taxis=-1\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.concat([attention_mask, next_attn_mask]\t\t\t,\t\t\taxis=-1\t\t\t\t\t)\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel(__a\t\t\t,\t\t\tattention_mask=__a\t\t\t\t\t)[0]\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel(__a\t\t\t,\t\t\tattention_mask=__a\t\t\t,\t\t\tpast_key_values=__a\t\t\t\t\t)[0]\r\n\r\n self.parent.assertEqual(next_tokens.shape[1]\t\t\t,\t\t\toutput_from_past.shape[1]\t\t\t\t\t)\r\n\r\n # select random slice\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tint(ids_tensor((1,)\t\t\t,\t\t\toutput_from_past.shape[-1]\t\t\t\t\t)\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\toutput_from_no_past[:, -3:, random_slice_idx]\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\toutput_from_past[:, :, random_slice_idx]\r\n\r\n # test that outputs are equal for slice\r\n tf.debugging.assert_near(__a\t\t\t,\t\t\t__a\t\t\t,\t\t\trtol=1E-3\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\tUpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , )\t\t\t->\t\t\t\t\tint:\r\n if attention_mask is None:\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )\r\n if decoder_attention_mask is None:\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.concat(\r\n [\r\n tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),\r\n tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),\r\n ] , axis=-1 , )\r\n if head_mask is None:\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.ones((config.encoder_layers, config.encoder_attention_heads) )\r\n if decoder_head_mask is None:\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.ones((config.decoder_layers, config.decoder_attention_heads) )\r\n if cross_attn_head_mask is None:\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttf.ones((config.decoder_layers, config.decoder_attention_heads) )\r\n return {\r\n \"input_ids\": input_ids,\r\n \"decoder_input_ids\": decoder_input_ids,\r\n \"attention_mask\": attention_mask,\r\n \"decoder_attention_mask\": decoder_attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n }\r\n\r\n\r\n\r\n@require_tf\r\nclass lowercase\t\t(\t\t\t_UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t(\r\n (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()\r\n )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t(\r\n {\r\n \"\"\"conversational\"\"\": TFBlenderbotSmallForConditionalGeneration,\r\n \"\"\"feature-extraction\"\"\": TFBlenderbotSmallModel,\r\n \"\"\"summarization\"\"\": TFBlenderbotSmallForConditionalGeneration,\r\n \"\"\"text2text-generation\"\"\": TFBlenderbotSmallForConditionalGeneration,\r\n \"\"\"translation\"\"\": TFBlenderbotSmallForConditionalGeneration,\r\n }\r\n if is_tf_available()\r\n else {}\r\n )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\tTrue\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\tFalse\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tTFBlenderbotSmallModelTester(self\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tConfigTester(self\t\t\t,\t\t\tconfig_class=__a\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n self.model_tester.check_decoder_model_past_large_inputs(*__a\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_tokenizers\r\n@require_tf\r\nclass lowercase\t\t(\t\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t[\r\n \"\"\"Social anxiety\\nWow, I am never shy. Do you have anxiety?\\nYes. I end up sweating and blushing and feel like \"\"\"\r\n \"\"\" i'm going to throw up.\\nand why is that?\"\"\"\r\n ]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t=\t\t\t\t\t\t\t\"\"\"facebook/blenderbot_small-90M\"\"\"\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M'\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tTFAutoModelForSeqaSeqLM.from_pretrained(self.model_name\t\t\t\t\t)\r\n return model\r\n\r\n\r\n\r\n\r\n @slow\r\n def UpperCamelCase__\t\t\t\t(self\t\t\t\t\t)\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.tokenizer(self.src_text\t\t\t,\t\t\treturn_tensors='tf'\t\t\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.model.generate(\r\n model_inputs.input_ids\t\t\t,\t\t\tattention_mask=model_inputs.attention_mask\t\t\t,\t\t\tnum_beams=2\t\t\t,\t\t\tuse_cache=__a\t\t\t,\t\t\t)\r\n UpperCAmelCase__ \t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.tokenizer.batch_decode(generated_ids.numpy()\t\t\t,\t\t\tskip_special_tokens=__a\t\t\t\t\t)[0]\r\n assert generated_words in (\r\n \"i don't know. i just feel like i'm going to throw up. it's not fun.\",\r\n \"i'm not sure. i just feel like i've been feeling like i have to be in a certain place\",\r\n \"i'm not sure. i just feel like i've been in a bad situation.\",\r\n )\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":146,"string":"146"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":523,"cells":{"code":{"kind":"string","value":"'''simple docstring'''\rdef __lowerCamelCase ( _UpperCamelCase : int\t\t\t\t\t\t\t, _UpperCamelCase : int\t\t\t\t\t):\r\r\r '''simple docstring'''\r\r\r return int((input_a, input_a).count(0\t\t\t\t\t) != 0\t\t\t\t\t)\r\r\r\rdef __lowerCamelCase ( ):\r\r\r '''simple docstring'''\r\r\r assert nand_gate(0\t\t\t\t\t\t\t, 0\t\t\t\t\t) == 1\r assert nand_gate(0\t\t\t\t\t\t\t, 1\t\t\t\t\t) == 1\r assert nand_gate(1\t\t\t\t\t\t\t, 0\t\t\t\t\t) == 1\r assert nand_gate(1\t\t\t\t\t\t\t, 1\t\t\t\t\t) == 0\r\r\rif __name__ == \"__main__\":\r print(nand_gate(0, 0))\r print(nand_gate(0, 1))\r print(nand_gate(1, 0))\r print(nand_gate(1, 1))\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":710,"string":"710"},"style_context":{"kind":"string","value":"'''simple docstring'''\nimport os\n\nimport pytest\n\nfrom datasets import (\n get_dataset_config_info,\n get_dataset_config_names,\n get_dataset_infos,\n get_dataset_split_names,\n inspect_dataset,\n inspect_metric,\n)\n\n\nlowercase__ : Tuple =\t\t\t\t\tpytest.mark.integration\n\n\n\n@pytest.mark.parametrize('''path'''\t\t\t\t\t\t\t, ['''paws''', '''csv''']\t\t\t\t\t)\ndef __lowerCamelCase ( _UpperCamelCase : Optional[Any]\t\t\t\t\t\t\t, _UpperCamelCase : int\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n inspect_dataset(_UpperCamelCase\t\t\t\t\t\t\t, _UpperCamelCase\t\t\t\t\t)\n UpperCAmelCase_ = path + '''.py'''\n assert script_name in os.listdir(_UpperCamelCase\t\t\t\t\t)\n assert \"__pycache__\" not in os.listdir(_UpperCamelCase\t\t\t\t\t)\n\n\n\n@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning'''\t\t\t\t\t)\n@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning'''\t\t\t\t\t)\n@pytest.mark.parametrize('''path'''\t\t\t\t\t\t\t, ['''accuracy''']\t\t\t\t\t)\ndef __lowerCamelCase ( _UpperCamelCase : Tuple\t\t\t\t\t\t\t, _UpperCamelCase : Optional[Any]\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n inspect_metric(_UpperCamelCase\t\t\t\t\t\t\t, _UpperCamelCase\t\t\t\t\t)\n UpperCAmelCase_ = path + '''.py'''\n assert script_name in os.listdir(_UpperCamelCase\t\t\t\t\t)\n assert \"__pycache__\" not in os.listdir(_UpperCamelCase\t\t\t\t\t)\n\n\n\n@pytest.mark.parametrize(\n '''path, config_name, expected_splits'''\t\t\t\t\t\t\t, [\n ('''squad''', '''plain_text''', ['''train''', '''validation''']),\n ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),\n ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : Optional[Any]\t\t\t\t\t\t\t, _UpperCamelCase : Any\t\t\t\t\t\t\t, _UpperCamelCase : int\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase\t\t\t\t\t\t\t, config_name=_UpperCamelCase\t\t\t\t\t)\n assert info.config_name == config_name\n assert list(info.splits.keys()\t\t\t\t\t) == expected_splits\n\n\n\n@pytest.mark.parametrize(\n '''path, config_name, expected_exception'''\t\t\t\t\t\t\t, [\n ('''paws''', None, ValueError),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : Any\t\t\t\t\t\t\t, _UpperCamelCase : List[str]\t\t\t\t\t\t\t, _UpperCamelCase : Union[str, Any]\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n with pytest.raises(_UpperCamelCase\t\t\t\t\t):\n get_dataset_config_info(_UpperCamelCase\t\t\t\t\t\t\t, config_name=_UpperCamelCase\t\t\t\t\t)\n\n\n\n@pytest.mark.parametrize(\n '''path, expected'''\t\t\t\t\t\t\t, [\n ('''squad''', '''plain_text'''),\n ('''acronym_identification''', '''default'''),\n ('''lhoestq/squad''', '''plain_text'''),\n ('''lhoestq/test''', '''default'''),\n ('''lhoestq/demo1''', '''lhoestq--demo1'''),\n ('''dalle-mini/wit''', '''dalle-mini--wit'''),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : Any\t\t\t\t\t\t\t, _UpperCamelCase : Optional[Any]\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase\t\t\t\t\t)\n assert expected in config_names\n\n\n\n@pytest.mark.parametrize(\n '''path, expected_configs, expected_splits_in_first_config'''\t\t\t\t\t\t\t, [\n ('''squad''', ['''plain_text'''], ['''train''', '''validation''']),\n ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),\n ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : str\t\t\t\t\t\t\t, _UpperCamelCase : Optional[int]\t\t\t\t\t\t\t, _UpperCamelCase : int\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase\t\t\t\t\t)\n assert list(infos.keys()\t\t\t\t\t) == expected_configs\n UpperCAmelCase_ = expected_configs[0]\n assert expected_config in infos\n UpperCAmelCase_ = infos[expected_config]\n assert info.config_name == expected_config\n assert list(info.splits.keys()\t\t\t\t\t) == expected_splits_in_first_config\n\n\n\n@pytest.mark.parametrize(\n '''path, expected_config, expected_splits'''\t\t\t\t\t\t\t, [\n ('''squad''', '''plain_text''', ['''train''', '''validation''']),\n ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),\n ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : str\t\t\t\t\t\t\t, _UpperCamelCase : int\t\t\t\t\t\t\t, _UpperCamelCase : str\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase\t\t\t\t\t)\n assert expected_config in infos\n UpperCAmelCase_ = infos[expected_config]\n assert info.config_name == expected_config\n assert list(info.splits.keys()\t\t\t\t\t) == expected_splits\n\n\n\n\n\n@pytest.mark.parametrize(\n '''path, config_name, expected_exception'''\t\t\t\t\t\t\t, [\n ('''paws''', None, ValueError),\n ]\t\t\t\t\t\t\t, )\ndef __lowerCamelCase ( _UpperCamelCase : Any\t\t\t\t\t\t\t, _UpperCamelCase : Optional[int]\t\t\t\t\t\t\t, _UpperCamelCase : Optional[int]\t\t\t\t\t):\n\n\n '''simple docstring'''\n\n\n with pytest.raises(_UpperCamelCase\t\t\t\t\t):\n get_dataset_split_names(_UpperCamelCase\t\t\t\t\t\t\t, config_name=_UpperCamelCase\t\t\t\t\t)\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":43,"string":"43"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":524,"cells":{"code":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\rimport warnings\r\rfrom diffusers import StableDiffusionImgaImgPipeline # noqa F401\r\r\rwarnings.warn(\r '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''\r ''' StableDiffusionImg2ImgPipeline` instead.'''\r)"},"code_codestyle":{"kind":"number","value":561,"string":"561"},"style_context":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\rimport json\rimport sys\rimport tempfile\rimport unittest\rfrom pathlib import Path\r\rimport transformers\rfrom transformers import (\r CONFIG_MAPPING,\r FEATURE_EXTRACTOR_MAPPING,\r AutoConfig,\r AutoFeatureExtractor,\r WavaVecaConfig,\r WavaVecaFeatureExtractor,\r)\rfrom transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir\r\r\rsys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))\r\rfrom test_module.custom_configuration import CustomConfig # noqa E402\rfrom test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402\r\r\rlowerCAmelCase :Optional[int] = get_tests_dir('''fixtures''')\rlowerCAmelCase :Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')\rlowerCAmelCase :Tuple = get_tests_dir('''fixtures/dummy-config.json''')\rclass _lowerCamelCase\t\t\t(\t\tunittest.TestCase\t\t\t\t):\r\r\r\r\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tUnion[str, Any] ) ->\t\t\t\t\tint:\r\t\t\t__magic_name__\t\t\t: str\t =\t0\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tint ) ->\t\t\t\t\tTuple:\r\t\t\t__magic_name__\t\t\t: List[str]\t =\tAutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )\r\t\t\tself.assertIsInstance(_A ,\t_A )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tTuple ) ->\t\t\t\t\tList[Any]:\r\t\t\t__magic_name__\t\t\t: List[str]\t =\tAutoFeatureExtractor.from_pretrained(_A )\r\t\t\tself.assertIsInstance(_A ,\t_A )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tTuple ) ->\t\t\t\t\tOptional[Any]:\r\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\t\t\t\t__magic_name__\t\t\t: Union[str, Any]\t =\tWavaVecaConfig()\r\r\t\t\t\t# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally\r\t\t\t\t__magic_name__\t\t\t: Dict\t =\tAutoFeatureExtractor.from_pretrained(_A ).to_dict()\r\r\t\t\t\tconfig_dict.pop('feature_extractor_type' )\r\t\t\t\t__magic_name__\t\t\t: int\t =\tWavaVecaFeatureExtractor(**_A )\r\r\t\t\t\t# save in new folder\r\t\t\t\tmodel_config.save_pretrained(_A )\r\t\t\t\tconfig.save_pretrained(_A )\r\r\t\t\t\t__magic_name__\t\t\t: Union[str, Any]\t =\tAutoFeatureExtractor.from_pretrained(_A )\r\r\t\t\t\t# make sure private variable is not incorrectly saved\r\t\t\t\t__magic_name__\t\t\t: List[str]\t =\tjson.loads(config.to_json_string() )\r\t\t\t\tself.assertTrue('_processor_class' not in dict_as_saved )\r\r\t\t\tself.assertIsInstance(_A ,\t_A )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tint ) ->\t\t\t\t\tUnion[str, Any]:\r\t\t\t__magic_name__\t\t\t: Tuple\t =\tAutoFeatureExtractor.from_pretrained(_A )\r\t\t\tself.assertIsInstance(_A ,\t_A )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tList[str] ) ->\t\t\t\t\tOptional[int]:\r\t\t\twith self.assertRaisesRegex(\r\t\t\t _A ,\t'bert-base is not a local folder and is not a valid model identifier' ):\r\t\t\t\t__magic_name__\t\t\t: str\t =\tAutoFeatureExtractor.from_pretrained('bert-base' )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tAny ) ->\t\t\t\t\tTuple:\r\t\t\twith self.assertRaisesRegex(\r\t\t\t _A ,\tR'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)' ):\r\t\t\t\t__magic_name__\t\t\t: Tuple\t =\tAutoFeatureExtractor.from_pretrained(_A ,\trevision='aaaaaa' )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tDict ) ->\t\t\t\t\tstr:\r\t\t\twith self.assertRaisesRegex(\r\t\t\t _A ,\t'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,\t):\r\t\t\t\t__magic_name__\t\t\t: Union[str, Any]\t =\tAutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tOptional[int] ) ->\t\t\t\t\tUnion[str, Any]:\r\t\t\t# If remote code is not set, we will time out when asking whether to load the model.\r\t\t\twith self.assertRaises(_A ):\r\t\t\t\t__magic_name__\t\t\t: Dict\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' )\r\t\t\t# If remote code is disabled, we can't load this config.\r\t\t\twith self.assertRaises(_A ):\r\t\t\t\t__magic_name__\t\t\t: Optional[Any]\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,\ttrust_remote_code=_A )\r\r\t\t\t__magic_name__\t\t\t: List[Any]\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,\ttrust_remote_code=_A )\r\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,\t'NewFeatureExtractor' )\r\r\t\t\t# Test feature extractor can be reloaded.\r\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\t\t\t\tfeature_extractor.save_pretrained(_A )\r\t\t\t\t__magic_name__\t\t\t: List[Any]\t =\tAutoFeatureExtractor.from_pretrained(_A ,\ttrust_remote_code=_A )\r\t\t\tself.assertEqual(reloaded_feature_extractor.__class__.__name__ ,\t'NewFeatureExtractor' )\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tstr ) ->\t\t\t\t\tTuple:\r\t\t\ttry:\r\t\t\t\tAutoConfig.register('custom' ,\t_A )\r\t\t\t\tAutoFeatureExtractor.register(_A ,\t_A )\r\t\t\t\t# Trying to register something existing in the Transformers library will raise an error\r\t\t\t\twith self.assertRaises(_A ):\r\t\t\t\t\tAutoFeatureExtractor.register(_A ,\t_A )\r\r\t\t\t\t# Now that the config is registered, it can be used as any other config with the auto-API\r\t\t\t\t__magic_name__\t\t\t: str\t =\tCustomFeatureExtractor.from_pretrained(_A )\r\t\t\t\twith tempfile.TemporaryDirectory() as tmp_dir:\r\t\t\t\t\tfeature_extractor.save_pretrained(_A )\r\t\t\t\t\t__magic_name__\t\t\t: List[Any]\t =\tAutoFeatureExtractor.from_pretrained(_A )\r\t\t\t\t\tself.assertIsInstance(_A ,\t_A )\r\r\t\t\tfinally:\r\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\t\t\t\tif CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:\r\t\t\t\t\tdel FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]\r\r\r\r\r\r\r\r\t\tdef __lowerCAmelCase\t\t\t\t\t\t( self\t\t\t\t\t\t:\t\tOptional[Any] ) ->\t\t\t\t\tDict:\r\t\t\tclass _lowerCamelCase\t\t\t(\t\tlowercase__\t\t\t\t):\r\r\r\r\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\tA_ : Any =\t\t\t\t\t\t\tTrue\r\r\t\t\ttry:\r\t\t\t\tAutoConfig.register('custom' ,\t_A )\r\t\t\t\tAutoFeatureExtractor.register(_A ,\t_A )\r\t\t\t\t# If remote code is not set, the default is to use local\r\t\t\t\t__magic_name__\t\t\t: Optional[Any]\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' )\r\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,\t'NewFeatureExtractor' )\r\t\t\t\tself.assertTrue(feature_extractor.is_local )\r\r\t\t\t\t# If remote code is disabled, we load the local one.\r\t\t\t\t__magic_name__\t\t\t: str\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,\ttrust_remote_code=_A )\r\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,\t'NewFeatureExtractor' )\r\t\t\t\tself.assertTrue(feature_extractor.is_local )\r\r\t\t\t\t# If remote is enabled, we load from the Hub\r\t\t\t\t__magic_name__\t\t\t: Tuple\t =\tAutoFeatureExtractor.from_pretrained(\r\t\t\t\t 'hf-internal-testing/test_dynamic_feature_extractor' ,\ttrust_remote_code=_A )\r\t\t\t\tself.assertEqual(feature_extractor.__class__.__name__ ,\t'NewFeatureExtractor' )\r\t\t\t\tself.assertTrue(not hasattr(_A ,\t'is_local' ) )\r\r\t\t\tfinally:\r\t\t\t\tif \"custom\" in CONFIG_MAPPING._extra_content:\r\t\t\t\t\tdel CONFIG_MAPPING._extra_content[\"custom\"]\r\t\t\t\tif CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:\r\t\t\t\t\tdel FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]"},"style_context_codestyle":{"kind":"number","value":561,"string":"561"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":525,"cells":{"code":{"kind":"string","value":"from __future__ import annotations\n\nimport os\nimport tempfile\nimport unittest\n\nfrom transformers import ConvBertConfig, is_tf_available\nfrom transformers.testing_utils import require_tf, slow\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask\nfrom ...test_pipeline_mixin import PipelineTesterMixin\n\n\nif is_tf_available():\n\t\timport tensorflow as tf\n\n\t\tfrom transformers import (\n\t\t TFConvBertForMaskedLM,\n\t\t TFConvBertForMultipleChoice,\n\t\t TFConvBertForQuestionAnswering,\n\t\t TFConvBertForSequenceClassification,\n\t\t TFConvBertForTokenClassification,\n\t\t TFConvBertModel,\n\t\t)\n\n\n\n\nclass _UpperCamelCase :\n\n\n\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\t\t\tdef __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=\"gelu\" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Union[str, Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tparent\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t13\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t7\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t99\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t3_84\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t2\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t4\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t37\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t'''gelu'''\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t0.1\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t0.1\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t5_12\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t16\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t2\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t0.02\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t3\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t4\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t1_28\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t2\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t9\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t1\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> List[str]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.vocab_size\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\tif self.use_input_mask:\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\trandom_attention_mask([self.batch_size, self.seq_length]\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\tif self.use_token_type_ids:\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tNone\n\t\t\t\t\t\t\tif self.use_labels:\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tids_tensor([self.batch_size] , self.type_sequence_label_size\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.num_labels\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tids_tensor([self.batch_size] , self.num_choices\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tConvBertConfig(\n\t\t\t\t\t\t\t vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )\n\n\t\t\t\t\t\t\treturn config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> Tuple:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertModel(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[input_ids, input_mask]\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> Union[str, Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertForMaskedLM(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''input_ids''': input_ids,\n\t\t\t\t\t\t\t '''attention_mask''': input_mask,\n\t\t\t\t\t\t\t '''token_type_ids''': token_type_ids,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> int:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.num_labels\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertForSequenceClassification(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''input_ids''': input_ids,\n\t\t\t\t\t\t\t '''attention_mask''': input_mask,\n\t\t\t\t\t\t\t '''token_type_ids''': token_type_ids,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> Dict:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.num_choices\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertForMultipleChoice(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.tile(tf.expand_dims(lowerCAmelCase__ , 1\t\t) , (1, self.num_choices, 1)\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.tile(tf.expand_dims(lowerCAmelCase__ , 1\t\t) , (1, self.num_choices, 1)\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.tile(tf.expand_dims(lowerCAmelCase__ , 1\t\t) , (1, self.num_choices, 1)\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''input_ids''': multiple_choice_inputs_ids,\n\t\t\t\t\t\t\t '''attention_mask''': multiple_choice_input_mask,\n\t\t\t\t\t\t\t '''token_type_ids''': multiple_choice_token_type_ids,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> Optional[int]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.num_labels\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertForTokenClassification(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''input_ids''': input_ids,\n\t\t\t\t\t\t\t '''attention_mask''': input_mask,\n\t\t\t\t\t\t\t '''token_type_ids''': token_type_ids,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t) -> List[str]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertForQuestionAnswering(config=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''input_ids''': input_ids,\n\t\t\t\t\t\t\t '''attention_mask''': input_mask,\n\t\t\t\t\t\t\t '''token_type_ids''': token_type_ids,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)\t\t)\n\t\t\t\t\t\t\tself.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.prepare_config_and_inputs()\n\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , (\n\t\t\t\t\t\t\t __lowercase\n\t\t\t\t\t\t\t) , \n\t\t\t\t\t\t\t)\t\t\t\t =\t\t\t\t\t\t\tconfig_and_inputs\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}\n\t\t\t\t\t\t\treturn config, inputs_dict\n\n\n\n\n@require_tf\nclass _UpperCamelCase (\t\t_UpperCAmelCase\t\t,_UpperCAmelCase\t\t,unittest.TestCase\t\t\t\t):\n\n\n\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\t\t\t__a\t\t\t\t\t\t:\t\t\t\tOptional[Any] \t\t\t\t=\t\t(\n\t\t\t (\n\t\t\t TFConvBertModel,\n\t\t\t TFConvBertForMaskedLM,\n\t\t\t TFConvBertForQuestionAnswering,\n\t\t\t TFConvBertForSequenceClassification,\n\t\t\t TFConvBertForTokenClassification,\n\t\t\t TFConvBertForMultipleChoice,\n\t\t\t )\n\t\t\t if is_tf_available()\n\t\t\t else ()\n\t\t\t)\n\t\t\t__a\t\t\t\t\t\t:\t\t\t\tOptional[int] \t\t\t\t=\t\t(\n\t\t\t {\n\t\t\t '''feature-extraction''': TFConvBertModel,\n\t\t\t '''fill-mask''': TFConvBertForMaskedLM,\n\t\t\t '''question-answering''': TFConvBertForQuestionAnswering,\n\t\t\t '''text-classification''': TFConvBertForSequenceClassification,\n\t\t\t '''token-classification''': TFConvBertForTokenClassification,\n\t\t\t '''zero-shot''': TFConvBertForSequenceClassification,\n\t\t\t }\n\t\t\t if is_tf_available()\n\t\t\t else {}\n\t\t\t)\n\t\t\t__a\t\t\t\t\t\t:\t\t\t\tUnion[str, Any] \t\t\t\t=\t\tFalse\n\t\t\t__a\t\t\t\t\t\t:\t\t\t\tList[str] \t\t\t\t=\t\tFalse\n\t\t\t__a\t\t\t\t\t\t:\t\t\t\tint \t\t\t\t=\t\tFalse\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> int:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertModelTester(self\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Dict:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\tself.config_tester.run_common_tests()\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> str:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_model(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Any:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> int:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Tuple:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Any:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Optional[int]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\n\t\t\t\t\t\t\tself.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase , __lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\n\t\t\t\t\t\t\tif hasattr(lowerCAmelCase__ , '''use_cache'''\t\t):\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''key_length''' , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tfor model_class in self.all_model_classes:\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel_class(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tlen(model(lowerCAmelCase__\t\t)\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tos.path.join(lowerCAmelCase__ , '''saved_model''' , '''1'''\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.keras.models.load_model(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.is_encoder_decoder:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\toutputs['''encoder_hidden_states''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\toutputs['''encoder_attentions''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\toutputs['''hidden_states''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\toutputs['''attentions''']\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t\t) , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t\t) , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t list(output_hidden_states[0].shape[-2:]\t\t) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t\t) , self.model_tester.num_hidden_layers\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t list(output_attentions[0].shape[-3:]\t\t) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> str:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertModel.from_pretrained('''YituTech/conv-bert-base'''\t\t)\n\t\t\t\t\t\t\tself.assertIsNotNone(lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Dict:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase , __lowercase\t\t\t\t =\t\t\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''key_length''' , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tgetattr(self.model_tester , '''key_length''' , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tdef check_decoder_attentions_output(lowerCAmelCase__\t\t):\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tlen(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(out_len % 2 , 0\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\toutputs.decoder_attentions\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t\t) , self.model_tester.num_hidden_layers\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\n\t\t\t\t\t\t\t\t\t\t\t list(decoder_attentions[0].shape[-3:]\t\t) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )\n\n\t\t\t\t\t\t\tdef check_encoder_attentions_output(lowerCAmelCase__\t\t):\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)\n\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase__\t\t) , self.model_tester.num_hidden_layers\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(\n\t\t\t\t\t\t\t\t\t\t\t list(attentions[0].shape[-3:]\t\t) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )\n\n\t\t\t\t\t\t\tfor model_class in self.all_model_classes:\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tFalse\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel_class(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__\t\t)\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tlen(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tif self.is_encoder_decoder:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel_class(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__\t\t)\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcheck_decoder_attentions_output(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# Check that output attentions can also be changed via the config\n\t\t\t\t\t\t\t\t\t\t\tdel inputs_dict[\"output_attentions\"]\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel_class(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__\t\t)\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(config.output_hidden_states , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# Check attention is always last and order is fine\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTrue\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel_class(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__\t\t)\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__\t\t)\t\t)\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(model.config.output_hidden_states , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t\t\t\t\tcheck_encoder_attentions_output(lowerCAmelCase__\t\t)\n\n\n\n\n@require_tf\nclass _UpperCamelCase (\t\tunittest.TestCase\t\t\t\t):\n\n\n\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\t\t\t@slow\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> str:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tTFConvBertModel.from_pretrained('''YituTech/conv-bert-base'''\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.constant([[0, 1, 2, 3, 4, 5]]\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tmodel(lowerCAmelCase__\t\t)[0]\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[1, 6, 7_68]\n\t\t\t\t\t\t\tself.assertEqual(output.shape , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttf.constant(\n\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t [-0.0347_5493, -0.468_6034, -0.3063_8832],\n\t\t\t\t\t\t\t [0.2263_7248, -0.2698_8646, -0.742_3424],\n\t\t\t\t\t\t\t [0.1032_4868, -0.4501_3508, -0.5828_0784],\n\t\t\t\t\t\t\t ]\n\t\t\t\t\t\t\t ]\t\t)\n\t\t\t\t\t\t\ttf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4\t\t)"},"code_codestyle":{"kind":"number","value":522,"string":"522"},"style_context":{"kind":"string","value":"import json\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pytest\n\nfrom transformers import BertTokenizer, BertTokenizerFast\nfrom transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES\nfrom transformers.testing_utils import require_vision\nfrom transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available\n\n\nif is_vision_available():\n\t\tfrom PIL import Image\n\n\t\tfrom transformers import AlignProcessor, EfficientNetImageProcessor\n\n\n\n\n@require_vision\nclass _UpperCamelCase (\t\tunittest.TestCase\t\t\t\t):\n\n\n\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Tuple:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttempfile.mkdtemp()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t '''[UNK]''',\n\t\t\t\t\t\t\t '''[CLS]''',\n\t\t\t\t\t\t\t '''[SEP]''',\n\t\t\t\t\t\t\t '''[PAD]''',\n\t\t\t\t\t\t\t '''[MASK]''',\n\t\t\t\t\t\t\t '''want''',\n\t\t\t\t\t\t\t '''##want''',\n\t\t\t\t\t\t\t '''##ed''',\n\t\t\t\t\t\t\t '''wa''',\n\t\t\t\t\t\t\t '''un''',\n\t\t\t\t\t\t\t '''runn''',\n\t\t\t\t\t\t\t '''##ing''',\n\t\t\t\t\t\t\t ''',''',\n\t\t\t\t\t\t\t '''low''',\n\t\t\t\t\t\t\t '''lowest''',\n\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tos.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']\t\t)\n\t\t\t\t\t\t\twith open(self.vocab_file , '''w''' , encoding='''utf-8'''\t\t) as vocab_writer:\n\t\t\t\t\t\t\t\t\t\t\tvocab_writer.write(''''''.join([x + '''\\n''' for x in vocab_tokens]\t\t)\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t '''do_resize''': True,\n\t\t\t\t\t\t\t '''size''': 20,\n\t\t\t\t\t\t\t '''do_center_crop''': True,\n\t\t\t\t\t\t\t '''crop_size''': 18,\n\t\t\t\t\t\t\t '''do_normalize''': True,\n\t\t\t\t\t\t\t '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],\n\t\t\t\t\t\t\t '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tos.path.join(self.tmpdirname , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\twith open(self.image_processor_file , '''w''' , encoding='''utf-8'''\t\t) as fp:\n\t\t\t\t\t\t\t\t\t\t\tjson.dump(lowerCAmelCase__ , lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__\t\t) -> int:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\treturn BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__\t\t) -> List[str]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\treturn BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__\t\t) -> Tuple:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\treturn EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> str:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\tshutil.rmtree(self.tmpdirname\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> List[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta\t\t)]\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1\t\t)\t\t) for x in image_inputs]\n\t\t\t\t\t\t\treturn image_inputs\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_rust_tokenizer()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tprocessor_slow.save_pretrained(self.tmpdirname\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tprocessor_fast.save_pretrained(self.tmpdirname\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor.from_pretrained(self.tmpdirname\t\t)\n\n\t\t\t\t\t\t\tself.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()\t\t)\n\t\t\t\t\t\t\tself.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()\t\t)\n\t\t\t\t\t\t\tself.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()\t\t)\n\t\t\t\t\t\t\tself.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Dict:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()\t\t)\n\t\t\t\t\t\t\tprocessor.save_pretrained(self.tmpdirname\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)'''\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor.from_pretrained(\n\t\t\t\t\t\t\t self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0\t\t)\n\n\t\t\t\t\t\t\tself.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor.tokenizer , lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()\t\t)\n\t\t\t\t\t\t\tself.assertIsInstance(processor.image_processor , lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Dict:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.prepare_image_inputs()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\timage_processor(lowerCAmelCase__ , return_tensors='''np'''\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tprocessor(images=lowerCAmelCase__ , return_tensors='''np'''\t\t)\n\n\t\t\t\t\t\t\tfor key in input_image_proc.keys():\n\t\t\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> List[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t'''lower newer'''\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tprocessor(text=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64\t\t)\n\n\t\t\t\t\t\t\tfor key in encoded_tok.keys():\n\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(encoded_tok[key] , encoded_processor[key]\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> List[str]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t'''lower newer'''\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.prepare_image_inputs()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.assertListEqual(list(inputs.keys()\t\t) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values''']\t\t)\n\n\t\t\t\t\t\t\t# test if it raises when no input is passed\n\t\t\t\t\t\t\twith pytest.raises(lowerCAmelCase__\t\t):\n\t\t\t\t\t\t\t\t\t\t\tprocessor()\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> str:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tprocessor.batch_decode(lowerCAmelCase__\t\t)\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\ttokenizer.batch_decode(lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__\t\t)\n\n\n\n\n\n\t\t\tdef _SCREAMING_SNAKE_CASE ( self\t\t) -> Optional[Any]:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_image_processor()\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.get_tokenizer()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tAlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\t'''lower newer'''\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tself.prepare_image_inputs()\n\n\t\t\t\t\t\t\t__lowercase\t\t\t\t =\t\t\t\t\t\t\tprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__\t\t)\n\n\t\t\t\t\t\t\tself.assertListEqual(list(inputs.keys()\t\t) , processor.model_input_names\t\t)"},"style_context_codestyle":{"kind":"number","value":522,"string":"522"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":526,"cells":{"code":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nfrom math import ceil\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom ...audio_utils import mel_filter_bank, spectrogram, window_function\nfrom ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor\nfrom ...utils import TensorType, logging\n\n\nlowerCAmelCase__ =\t\t\t\t\t\t\tlogging.get_logger(__name__)\n\n\n\n\n\n\nclass \t\t\t__snake_case\t\t\t( _lowercase):\n snake_case__\t\t\t\t\t\t\t: Dict =\t\t[\"audio_values\", \"audio_mask\"]\n\n\n\n\n def __init__(\t\t\t\t\tself\t:\t\tList[str]\t\t, __lowerCAmelCase\t:\t\tOptional[Any]=2_0_4_8\t\t, __lowerCAmelCase\t:\t\tOptional[int]=1\t\t, __lowerCAmelCase\t:\t\tDict=[1_6, 1_6]\t\t, __lowerCAmelCase\t:\t\tOptional[Any]=1_2_8\t\t, __lowerCAmelCase\t:\t\tOptional[int]=4_4_1_0_0\t\t, __lowerCAmelCase\t:\t\tOptional[Any]=8_6\t\t, __lowerCAmelCase\t:\t\tDict=2_0_4_8\t\t, __lowerCAmelCase\t:\t\tTuple=0.0\t\t, **__lowerCAmelCase\t:\t\tDict\t\t, ):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n super().__init__(\n feature_size=__lowerCAmelCase\t\t, sampling_rate=__lowerCAmelCase\t\t, padding_value=__lowerCAmelCase\t\t, **__lowerCAmelCase\t\t, )\n\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tstr =\t\t\t\t\tspectrogram_length\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[str] =\t\t\t\t\tnum_channels\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[str] =\t\t\t\t\tpatch_size\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[Any] =\t\t\t\t\tfeature_size // self.patch_size[1]\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tAny =\t\t\t\t\tn_fft\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tint =\t\t\t\t\tsampling_rate // hop_length_to_sampling_rate\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[int] =\t\t\t\t\tsampling_rate\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[Any] =\t\t\t\t\tpadding_value\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tstr =\t\t\t\t\tmel_filter_bank(\n num_frequency_bins=1 + n_fft // 2\t\t, num_mel_filters=__lowerCAmelCase\t\t, min_frequency=0.0\t\t, max_frequency=2_20_50.0\t\t, sampling_rate=__lowerCAmelCase\t\t, norm='''slaney'''\t\t, mel_scale='''slaney'''\t\t, ).T\n\n\n\n\n def \t\tSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t(\t\t\t\t\tself\t:\t\tOptional[Any]\t\t, __lowerCAmelCase\t:\t\tnp.array\t\t\t\t\t\t):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tTuple =\t\t\t\t\tspectrogram(\n __lowerCAmelCase\t\t, window_function(self.n_fft\t\t, '''hann'''\t\t\t\t\t\t)\t\t, frame_length=self.n_fft\t\t, hop_length=self.hop_length\t\t, power=2.0\t\t, mel_filters=self.mel_filters.T\t\t, log_mel='''dB'''\t\t, db_range=80.0\t\t, )\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tUnion[str, Any] =\t\t\t\t\tlog_spec[:, :-1]\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[Any] =\t\t\t\t\tlog_spec - 20.0\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[Any] =\t\t\t\t\tnp.clip(log_spec / 40.0\t\t, -2.0\t\t, 0.0\t\t\t\t\t\t) + 1.0\n return log_spec\n\n\n\n\n def __call__(\t\t\t\t\tself\t:\t\tOptional[Any]\t\t, __lowerCAmelCase\t:\t\tUnion[np.ndarray, List[float], List[np.ndarray], List[List[float]]]\t\t, __lowerCAmelCase\t:\t\tOptional[Union[str, TensorType]] = None\t\t, __lowerCAmelCase\t:\t\tOptional[bool] = True\t\t, __lowerCAmelCase\t:\t\tOptional[int] = None\t\t, __lowerCAmelCase\t:\t\tbool = False\t\t, __lowerCAmelCase\t:\t\tbool = False\t\t, **__lowerCAmelCase\t:\t\tList[str]\t\t, ):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n if sampling_rate is not None:\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n '''This feature extractor is set to support sampling rate'''\n f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''\n f''' with {self.sampling_rate} and not {sampling_rate}.'''\t\t\t\t\t\t)\n else:\n logger.warning(\n '''It is strongly recommended to pass the `sampling_rate` argument to this function. '''\n '''Failing to do so can result in silent errors that might be hard to debug.'''\t\t\t\t\t\t)\n\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tUnion[str, Any] =\t\t\t\t\tisinstance(__lowerCAmelCase\t\t, np.ndarray\t\t\t\t\t\t) and len(raw_speech.shape\t\t\t\t\t\t) > 1\n if is_batched_numpy and len(raw_speech.shape\t\t\t\t\t\t) > 2:\n raise ValueError(f'''Only mono-channel audio is supported for input to {self}'''\t\t\t\t\t\t)\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[str] =\t\t\t\t\tis_batched_numpy or (\n isinstance(__lowerCAmelCase\t\t, (list, tuple)\t\t\t\t\t\t) and (isinstance(raw_speech[0]\t\t, (np.ndarray, tuple, list)\t\t\t\t\t\t))\n )\n if is_batched:\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tstr =\t\t\t\t\t[np.asarray([speech]\t\t, dtype=np.floataa\t\t\t\t\t\t).T for speech in raw_speech]\n elif not is_batched and not isinstance(__lowerCAmelCase\t\t, np.ndarray\t\t\t\t\t\t):\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tAny =\t\t\t\t\tnp.asarray(__lowerCAmelCase\t\t, dtype=np.floataa\t\t\t\t\t\t)\n elif isinstance(__lowerCAmelCase\t\t, np.ndarray\t\t\t\t\t\t) and raw_speech.dtype is np.dtype(np.floataa\t\t\t\t\t\t):\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tUnion[str, Any] =\t\t\t\t\traw_speech.astype(np.floataa\t\t\t\t\t\t)\n # always return batch\n if not is_batched:\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tTuple =\t\t\t\t\t[np.asarray([raw_speech]\t\t\t\t\t\t).T]\n\n # Convert audio signals to log mel spectrograms, truncate by time axis\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tint =\t\t\t\t\t[\n self._np_extract_fbank_features(waveform.squeeze()\t\t\t\t\t\t).T[: self.spectrogram_length] for waveform in raw_speech\n ]\n if isinstance(audio_features[0]\t\t, __lowerCAmelCase\t\t\t\t\t\t):\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tUnion[str, Any] =\t\t\t\t\t[np.asarray(__lowerCAmelCase\t\t, dtype=np.floataa\t\t\t\t\t\t) for feature in audio_features]\n\n # Create audio attention mask\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[Any] =\t\t\t\t\tmax(\n [ceil(feature.shape[0] / self.patch_size[0]\t\t\t\t\t\t) * self.freq_len for feature in audio_features]\t\t\t\t\t\t) # The maximum number of audio patches in a batch\n if return_attention_mask:\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tAny =\t\t\t\t\t[\n (ceil(feature.shape[0] / self.patch_size[0]\t\t\t\t\t\t) * self.freq_len) * [1]\n + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]\t\t\t\t\t\t) * self.freq_len) * [0]\n for feature in audio_features\n ]\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tstr =\t\t\t\t\tnp.array(__lowerCAmelCase\t\t\t\t\t\t).astype(np.floataa\t\t\t\t\t\t)\n\n # convert into correct format for padding\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[str] =\t\t\t\t\tmax_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[int] =\t\t\t\t\tnp.ones([len(__lowerCAmelCase\t\t\t\t\t\t), 1, max_time_len, self.feature_size]\t\t\t\t\t\t).astype(np.floataa\t\t\t\t\t\t)\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tint =\t\t\t\t\tpadded_audio_features * self.padding_value\n for i in range(len(__lowerCAmelCase\t\t\t\t\t\t)\t\t\t\t\t\t):\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tList[str] =\t\t\t\t\taudio_features[i]\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[Any] =\t\t\t\t\tfeature\n\n # return as BatchFeature\n if return_attention_mask:\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tUnion[str, Any] =\t\t\t\t\t{'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}\n else:\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tAny =\t\t\t\t\t{'''audio_values''': padded_audio_features}\n\n _lowerCamelCase\t\t:\t\t\t\t\t\t\tOptional[int] =\t\t\t\t\tBatchFeature(data=__lowerCAmelCase\t\t, tensor_type=__lowerCAmelCase\t\t\t\t\t\t)\n return encoded_inputs\n\n\n"},"code_codestyle":{"kind":"number","value":83,"string":"83"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rimport unittest\r\rfrom parameterized import parameterized\r\rfrom transformers import OpenLlamaConfig, is_torch_available, set_seed\rfrom transformers.testing_utils import require_torch, torch_device\r\rfrom ...generation.test_utils import GenerationTesterMixin\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_torch_available():\r import torch\r\r from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel\r\r\r\r\r\r\r\rclass a\t\t\t\t\t\t\t:\r\r\r\r def __init__( self\t\t\t\t, _snake_case\t\t\t\t, _snake_case=13\t\t\t\t, _snake_case=7\t\t\t\t, _snake_case=True\t\t\t\t, _snake_case=True\t\t\t\t, _snake_case=False\t\t\t\t, _snake_case=True\t\t\t\t, _snake_case=99\t\t\t\t, _snake_case=32\t\t\t\t, _snake_case=5\t\t\t\t, _snake_case=4\t\t\t\t, _snake_case=37\t\t\t\t, _snake_case=\"gelu\"\t\t\t\t, _snake_case=0.1\t\t\t\t, _snake_case=0.1\t\t\t\t, _snake_case=5_12\t\t\t\t, _snake_case=16\t\t\t\t, _snake_case=2\t\t\t\t, _snake_case=0.02\t\t\t\t, _snake_case=3\t\t\t\t, _snake_case=4\t\t\t\t, _snake_case=None\t\t\t\t, ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= parent\r lowerCAmelCase \t\t\t\t= batch_size\r lowerCAmelCase \t\t\t\t= seq_length\r lowerCAmelCase \t\t\t\t= is_training\r lowerCAmelCase \t\t\t\t= use_input_mask\r lowerCAmelCase \t\t\t\t= use_token_type_ids\r lowerCAmelCase \t\t\t\t= use_labels\r lowerCAmelCase \t\t\t\t= vocab_size\r lowerCAmelCase \t\t\t\t= hidden_size\r lowerCAmelCase \t\t\t\t= num_hidden_layers\r lowerCAmelCase \t\t\t\t= num_attention_heads\r lowerCAmelCase \t\t\t\t= intermediate_size\r lowerCAmelCase \t\t\t\t= hidden_act\r lowerCAmelCase \t\t\t\t= hidden_dropout_prob\r lowerCAmelCase \t\t\t\t= attention_probs_dropout_prob\r lowerCAmelCase \t\t\t\t= max_position_embeddings\r lowerCAmelCase \t\t\t\t= type_vocab_size\r lowerCAmelCase \t\t\t\t= type_sequence_label_size\r lowerCAmelCase \t\t\t\t= initializer_range\r lowerCAmelCase \t\t\t\t= num_labels\r lowerCAmelCase \t\t\t\t= num_choices\r lowerCAmelCase \t\t\t\t= scope\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.vocab_size )\r\r lowerCAmelCase \t\t\t\t= None\r if self.use_input_mask:\r lowerCAmelCase \t\t\t\t= random_attention_mask([self.batch_size, self.seq_length] )\r\r lowerCAmelCase \t\t\t\t= None\r if self.use_token_type_ids:\r lowerCAmelCase \t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.type_vocab_size )\r\r lowerCAmelCase \t\t\t\t= None\r lowerCAmelCase \t\t\t\t= None\r lowerCAmelCase \t\t\t\t= None\r if self.use_labels:\r lowerCAmelCase \t\t\t\t= ids_tensor([self.batch_size]\t\t\t\t, self.type_sequence_label_size )\r lowerCAmelCase \t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t\t\t\t, self.num_labels )\r lowerCAmelCase \t\t\t\t= ids_tensor([self.batch_size]\t\t\t\t, self.num_choices )\r\r lowerCAmelCase \t\t\t\t= self.get_config()\r\r return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return OpenLlamaConfig(\r vocab_size=self.vocab_size\t\t\t\t, hidden_size=self.hidden_size\t\t\t\t, num_hidden_layers=self.num_hidden_layers\t\t\t\t, num_attention_heads=self.num_attention_heads\t\t\t\t, intermediate_size=self.intermediate_size\t\t\t\t, hidden_act=self.hidden_act\t\t\t\t, hidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t, max_position_embeddings=self.max_position_embeddings\t\t\t\t, type_vocab_size=self.type_vocab_size\t\t\t\t, is_decoder=_snake_case\t\t\t\t, initializer_range=self.initializer_range\t\t\t\t, use_stable_embedding=_snake_case\t\t\t\t, )\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= OpenLlamaModel(config=_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case )\r lowerCAmelCase \t\t\t\t= model(_snake_case )\r self.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size) )\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= True\r lowerCAmelCase \t\t\t\t= OpenLlamaModel(_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(\r _snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, encoder_hidden_states=_snake_case\t\t\t\t, encoder_attention_mask=_snake_case\t\t\t\t, )\r lowerCAmelCase \t\t\t\t= model(\r _snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, encoder_hidden_states=_snake_case\t\t\t\t, )\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case )\r self.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size) )\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= OpenLlamaForCausalLM(config=_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, labels=_snake_case )\r self.parent.assertEqual(result.logits.shape\t\t\t\t, (self.batch_size, self.seq_length, self.vocab_size) )\r\r\r\r def UpperCamelCase__ ( self\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, _snake_case\t\t\t\t, ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= True\r lowerCAmelCase \t\t\t\t= True\r lowerCAmelCase \t\t\t\t= OpenLlamaForCausalLM(config=_snake_case )\r model.to(_snake_case )\r model.eval()\r\r # first forward pass\r lowerCAmelCase \t\t\t\t= model(\r _snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, encoder_hidden_states=_snake_case\t\t\t\t, encoder_attention_mask=_snake_case\t\t\t\t, use_cache=_snake_case\t\t\t\t, )\r lowerCAmelCase \t\t\t\t= outputs.past_key_values\r\r # create hypothetical multiple next token and extent to next_input_ids\r lowerCAmelCase \t\t\t\t= ids_tensor((self.batch_size, 3)\t\t\t\t, config.vocab_size )\r lowerCAmelCase \t\t\t\t= ids_tensor((self.batch_size, 3)\t\t\t\t, vocab_size=2 )\r\r # append to next input_ids and\r lowerCAmelCase \t\t\t\t= torch.cat([input_ids, next_tokens]\t\t\t\t, dim=-1 )\r lowerCAmelCase \t\t\t\t= torch.cat([input_mask, next_mask]\t\t\t\t, dim=-1 )\r\r lowerCAmelCase \t\t\t\t= model(\r _snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, encoder_hidden_states=_snake_case\t\t\t\t, encoder_attention_mask=_snake_case\t\t\t\t, output_hidden_states=_snake_case\t\t\t\t, )['hidden_states'][0]\r lowerCAmelCase \t\t\t\t= model(\r _snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, encoder_hidden_states=_snake_case\t\t\t\t, encoder_attention_mask=_snake_case\t\t\t\t, past_key_values=_snake_case\t\t\t\t, output_hidden_states=_snake_case\t\t\t\t, )['hidden_states'][0]\r\r # select random slice\r lowerCAmelCase \t\t\t\t= ids_tensor((1,)\t\t\t\t, output_from_past.shape[-1] ).item()\r lowerCAmelCase \t\t\t\t= output_from_no_past[:, -3:, random_slice_idx].detach()\r lowerCAmelCase \t\t\t\t= output_from_past[:, :, random_slice_idx].detach()\r\r self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )\r\r # test that outputs are equal for slice\r self.parent.assertTrue(torch.allclose(_snake_case\t\t\t\t, _snake_case\t\t\t\t, atol=1E-3 ) )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= self.prepare_config_and_inputs()\r (\r (\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,(\r lowerCAmelCase\r )\t\t\t\t,\r ) \t\t\t\t= config_and_inputs\r lowerCAmelCase \t\t\t\t= {'input_ids': input_ids, 'attention_mask': input_mask}\r return config, inputs_dict\r\r\r\r\r\r\r\r@require_torch\rclass a\t\t\t\t\t\t\t( a__ , a__ , a__ , unittest.TestCase ):\r snake_case__ = (\r (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()\r )\r snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()\r snake_case__ = (\r {\r '''feature-extraction''': OpenLlamaModel,\r '''text-classification''': OpenLlamaForSequenceClassification,\r '''text-generation''': OpenLlamaForCausalLM,\r '''zero-shot''': OpenLlamaForSequenceClassification,\r }\r if is_torch_available()\r else {}\r )\r snake_case__ = False\r snake_case__ = False\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= OpenLlamaModelTester(self )\r lowerCAmelCase \t\t\t\t= ConfigTester(self\t\t\t\t, config_class=_snake_case\t\t\t\t, hidden_size=37 )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r self.config_tester.run_common_tests()\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs()\r self.model_tester.create_and_check_model(*_snake_case )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs()\r for type in [\"absolute\", \"relative_key\", \"relative_key_query\"]:\r lowerCAmelCase \t\t\t\t= type\r self.model_tester.create_and_check_model(*_snake_case )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase\t\t\t\t,lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r lowerCAmelCase \t\t\t\t= 3\r lowerCAmelCase \t\t\t\t= input_dict['input_ids']\r lowerCAmelCase \t\t\t\t= input_ids.ne(1 ).to(_snake_case )\r lowerCAmelCase \t\t\t\t= ids_tensor([self.model_tester.batch_size]\t\t\t\t, self.model_tester.type_sequence_label_size )\r lowerCAmelCase \t\t\t\t= OpenLlamaForSequenceClassification(_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, labels=_snake_case )\r self.assertEqual(result.logits.shape\t\t\t\t, (self.model_tester.batch_size, self.model_tester.num_labels) )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase\t\t\t\t,lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r lowerCAmelCase \t\t\t\t= 3\r lowerCAmelCase \t\t\t\t= 'single_label_classification'\r lowerCAmelCase \t\t\t\t= input_dict['input_ids']\r lowerCAmelCase \t\t\t\t= input_ids.ne(1 ).to(_snake_case )\r lowerCAmelCase \t\t\t\t= ids_tensor([self.model_tester.batch_size]\t\t\t\t, self.model_tester.type_sequence_label_size )\r lowerCAmelCase \t\t\t\t= OpenLlamaForSequenceClassification(_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, labels=_snake_case )\r self.assertEqual(result.logits.shape\t\t\t\t, (self.model_tester.batch_size, self.model_tester.num_labels) )\r\r\r\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase\t\t\t\t,lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r lowerCAmelCase \t\t\t\t= 3\r lowerCAmelCase \t\t\t\t= 'multi_label_classification'\r lowerCAmelCase \t\t\t\t= input_dict['input_ids']\r lowerCAmelCase \t\t\t\t= input_ids.ne(1 ).to(_snake_case )\r lowerCAmelCase \t\t\t\t= ids_tensor(\r [self.model_tester.batch_size, config.num_labels]\t\t\t\t, self.model_tester.type_sequence_label_size ).to(torch.float )\r lowerCAmelCase \t\t\t\t= OpenLlamaForSequenceClassification(_snake_case )\r model.to(_snake_case )\r model.eval()\r lowerCAmelCase \t\t\t\t= model(_snake_case\t\t\t\t, attention_mask=_snake_case\t\t\t\t, labels=_snake_case )\r self.assertEqual(result.logits.shape\t\t\t\t, (self.model_tester.batch_size, self.model_tester.num_labels) )\r\r\r\r @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )\r def UpperCamelCase__ ( self ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r pass\r\r\r\r @parameterized.expand([('linear',), ('dynamic',)] )\r def UpperCamelCase__ ( self\t\t\t\t, _snake_case ):\r\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r lowerCAmelCase\t\t\t\t,lowerCAmelCase \t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r lowerCAmelCase \t\t\t\t= ids_tensor([1, 10]\t\t\t\t, config.vocab_size )\r lowerCAmelCase \t\t\t\t= ids_tensor([1, int(config.max_position_embeddings * 1.5 )]\t\t\t\t, config.vocab_size )\r\r set_seed(42 ) # Fixed seed at init time so the two models get the same random weights\r lowerCAmelCase \t\t\t\t= OpenLlamaModel(_snake_case )\r original_model.to(_snake_case )\r original_model.eval()\r lowerCAmelCase \t\t\t\t= original_model(_snake_case ).last_hidden_state\r lowerCAmelCase \t\t\t\t= original_model(_snake_case ).last_hidden_state\r\r set_seed(42 ) # Fixed seed at init time so the two models get the same random weights\r lowerCAmelCase \t\t\t\t= {'type': scaling_type, 'factor': 10.0}\r lowerCAmelCase \t\t\t\t= OpenLlamaModel(_snake_case )\r scaled_model.to(_snake_case )\r scaled_model.eval()\r lowerCAmelCase \t\t\t\t= scaled_model(_snake_case ).last_hidden_state\r lowerCAmelCase \t\t\t\t= scaled_model(_snake_case ).last_hidden_state\r\r # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original\r # maximum sequence length, so the outputs for the short input should match.\r if scaling_type == \"dynamic\":\r self.assertTrue(torch.allclose(_snake_case\t\t\t\t, _snake_case\t\t\t\t, atol=1E-5 ) )\r else:\r self.assertFalse(torch.allclose(_snake_case\t\t\t\t, _snake_case\t\t\t\t, atol=1E-5 ) )\r\r # The output should be different for long inputs\r self.assertFalse(torch.allclose(_snake_case\t\t\t\t, _snake_case\t\t\t\t, atol=1E-5 ) )\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":4,"string":"4"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":527,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rimport copy\rfrom typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict\r\rfrom packaging import version\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...onnx import OnnxConfig\rfrom ...utils import logging\rfrom ..auto.configuration_auto import AutoConfig\r\r\rif TYPE_CHECKING:\r\t\t\t\t\t\tfrom ... import PreTrainedTokenizerBase, TensorType\r\r__A : Union[str, Any]\t\t= logging.get_logger(__name__)\r\r\r\r\rclass lowerCAmelCase__ (\t\t\tUpperCAmelCase__ ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t__UpperCAmelCase : str = 'vision-encoder-decoder'\r\t\t\t\t__UpperCAmelCase : Union[str, Any] = True\r\r\r\r\t\t\t\tdef __init__( self\t\t\t\t\t:\tOptional[Any] , **lowercase__\t\t\t\t\t:\tint\t\t\t\t\t\t):\r\t\t\t\t\t\tsuper().__init__(**lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\tif \"encoder\" not in kwargs or \"decoder\" not in kwargs:\r\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t f'A configuraton of type {self.model_type} cannot be instantiated because '\r\t\t\t\t\t\t\t\t f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}'\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tkwargs.pop(\"encoder\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tencoder_config.pop(\"model_type\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tkwargs.pop(\"decoder\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tdecoder_config.pop(\"model_type\"\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tAutoConfig.for_model(lowercase__ , **lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tAutoConfig.for_model(lowercase__ , **lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tTrue\r\r\r\r\t\t\t\t@classmethod\r\t\t\t\tdef snake_case ( cls\t\t\t\t\t:\tTuple , lowercase__\t\t\t\t\t:\tPretrainedConfig , lowercase__\t\t\t\t\t:\tPretrainedConfig , **lowercase__\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tlogger.info(\"Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tTrue\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tTrue\r\r\t\t\t\t\t\treturn cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase__\t\t\t\t\t\t)\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tcopy.deepcopy(self.__dict__\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.encoder.to_dict()\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.decoder.to_dict()\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.__class__.model_type\r\t\t\t\t\t\treturn output\r\r\r\r\r\rclass lowerCAmelCase__ (\t\t\tUpperCAmelCase__ ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t__UpperCAmelCase : Any = version.parse(\"1.11\" )\r\r\r\r\t\t\t\t@property\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tDict\t\t\t\t\t\t):\r\t\t\t\t\t\treturn OrderedDict(\r\t\t\t\t\t\t [\r\t\t\t\t\t\t (\"pixel_values\", {0: \"batch\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}),\r\t\t\t\t\t\t ]\t\t\t\t\t\t)\r\r\r\r\t\t\t\t@property\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tint\t\t\t\t\t\t):\r\t\t\t\t\t\treturn 1e-4\r\r\r\r\t\t\t\t@property\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\treturn OrderedDict({\"last_hidden_state\": {0: \"batch\", 1: \"encoder_sequence\"}}\t\t\t\t\t\t)\r\r\r\r\r\rclass lowerCAmelCase__ (\t\t\tUpperCAmelCase__ ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t@property\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tOrderedDict()\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"past_decoder_sequence + sequence\"\"\"}\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"past_decoder_sequence + sequence\"\"\"}\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t{0: \"\"\"batch\"\"\", 1: \"\"\"encoder_sequence\"\"\"}\r\r\t\t\t\t\t\treturn common_inputs\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[Any] , lowercase__\t\t\t\t\t:\t\"PreTrainedTokenizerBase\" , lowercase__\t\t\t\t\t:\tint = -1 , lowercase__\t\t\t\t\t:\tint = -1 , lowercase__\t\t\t\t\t:\tbool = False , lowercase__\t\t\t\t\t:\tOptional[\"TensorType\"] = None , ):\r\t\t\t\t\t\timport torch\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tOrderedDict()\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tsuper().generate_dummy_inputs(\r\t\t\t\t\t\t lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tdummy_input[\"\"\"input_ids\"\"\"].shape\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t(batch, encoder_sequence, self._config.encoder_hidden_size)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tdummy_input.pop(\"input_ids\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tdummy_input.pop(\"attention_mask\"\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\ttorch.zeros(lowercase__\t\t\t\t\t\t)\r\r\t\t\t\t\t\treturn common_inputs\r\r\r\r\r\rclass lowerCAmelCase__ (\t\t\tUpperCAmelCase__ ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t@property\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tstr , lowercase__\t\t\t\t\t:\tPretrainedConfig\t\t\t\t\t\t):\r\t\t\t\t\t\treturn VisionEncoderDecoderEncoderOnnxConfig(lowercase__\t\t\t\t\t\t)\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tDict , lowercase__\t\t\t\t\t:\tPretrainedConfig , lowercase__\t\t\t\t\t:\tPretrainedConfig , lowercase__\t\t\t\t\t:\tstr = \"default\"\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tencoder_config.hidden_size\r\t\t\t\t\t\treturn VisionEncoderDecoderDecoderOnnxConfig(lowercase__ , lowercase__\t\t\t\t\t\t)\r"},"code_codestyle":{"kind":"number","value":714,"string":"714"},"style_context":{"kind":"string","value":"\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rimport unittest\r\rfrom transformers import EsmConfig, is_torch_available\rfrom transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_torch_available():\r\t\t\t\t\t\timport torch\r\r\t\t\t\t\t\tfrom transformers.models.esm.modeling_esmfold import EsmForProteinFolding\r\r\r\r\rclass lowerCAmelCase__ :\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\tdef __init__( self\t\t\t\t\t:\tTuple , lowercase__\t\t\t\t\t:\tTuple , lowercase__\t\t\t\t\t:\tTuple=1_3 , lowercase__\t\t\t\t\t:\tOptional[int]=7 , lowercase__\t\t\t\t\t:\tList[str]=False , lowercase__\t\t\t\t\t:\tTuple=True , lowercase__\t\t\t\t\t:\tint=False , lowercase__\t\t\t\t\t:\tList[str]=False , lowercase__\t\t\t\t\t:\tOptional[Any]=1_9 , lowercase__\t\t\t\t\t:\tint=3_2 , lowercase__\t\t\t\t\t:\tList[Any]=5 , lowercase__\t\t\t\t\t:\tOptional[int]=4 , lowercase__\t\t\t\t\t:\tAny=3_7 , lowercase__\t\t\t\t\t:\tTuple=\"gelu\" , lowercase__\t\t\t\t\t:\tint=0.1 , lowercase__\t\t\t\t\t:\tTuple=0.1 , lowercase__\t\t\t\t\t:\tList[Any]=5_1_2 , lowercase__\t\t\t\t\t:\tList[Any]=1_6 , lowercase__\t\t\t\t\t:\tUnion[str, Any]=2 , lowercase__\t\t\t\t\t:\tList[str]=0.0_2 , lowercase__\t\t\t\t\t:\tList[Any]=3 , lowercase__\t\t\t\t\t:\tAny=4 , lowercase__\t\t\t\t\t:\tOptional[Any]=None , ):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tparent\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tbatch_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tseq_length\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tis_training\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tuse_input_mask\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tuse_token_type_ids\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tuse_labels\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tvocab_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\thidden_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tnum_hidden_layers\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tnum_attention_heads\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tintermediate_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\thidden_act\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\thidden_dropout_prob\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tattention_probs_dropout_prob\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tmax_position_embeddings\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\ttype_vocab_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\ttype_sequence_label_size\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tinitializer_range\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tnum_labels\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tnum_choices\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tscope\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[int]\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.vocab_size\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tNone\r\t\t\t\t\t\tif self.use_input_mask:\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\trandom_attention_mask([self.batch_size, self.seq_length]\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tNone\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tNone\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tNone\r\t\t\t\t\t\tif self.use_labels:\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size] , self.type_sequence_label_size\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.num_labels\t\t\t\t\t\t)\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tids_tensor([self.batch_size] , self.num_choices\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.get_config()\r\r\t\t\t\t\t\treturn config, input_ids, input_mask, sequence_labels, token_labels, choice_labels\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tEsmConfig(\r\t\t\t\t\t\t vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase__ , esmfold_config={\"trunk\": {\"num_blocks\": 2}, \"fp16_esm\": False} , )\r\t\t\t\t\t\treturn config\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tstr , lowercase__\t\t\t\t\t:\tList[Any] , lowercase__\t\t\t\t\t:\tOptional[int] , lowercase__\t\t\t\t\t:\tList[Any] , lowercase__\t\t\t\t\t:\tint , lowercase__\t\t\t\t\t:\tList[Any] , lowercase__\t\t\t\t\t:\tstr\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tEsmForProteinFolding(config=lowercase__\t\t\t\t\t\t).float()\r\t\t\t\t\t\tmodel.to(lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\tmodel.eval()\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tmodel(lowercase__ , attention_mask=lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tmodel(lowercase__\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tmodel(lowercase__\t\t\t\t\t\t)\r\r\t\t\t\t\t\tself.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3)\t\t\t\t\t\t)\r\t\t\t\t\t\tself.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2)\t\t\t\t\t\t)\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.prepare_config_and_inputs()\r\t\t\t\t\t\t(\r\t\t\t\t\t\t (\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,(\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,(\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,(\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,(\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,(\r\t\t\t\t\t\t __lowercase\r\t\t\t\t\t\t)\t\t\t\t\t\t,\r\t\t\t\t\t\t)\t\t\t\t:\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tconfig_and_inputs\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tint\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t{\"input_ids\": input_ids, \"attention_mask\": input_mask}\r\t\t\t\t\t\treturn config, inputs_dict\r\r\r\r\r\r@require_torch\rclass lowerCAmelCase__ (\t\t\tlowerCAmelCase_ ,\t\t\tlowerCAmelCase_ ,\t\t\tunittest.TestCase ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t__UpperCAmelCase : Union[str, Any] = False\r\r\t\t\t\t__UpperCAmelCase : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()\r\t\t\t\t__UpperCAmelCase : int = ()\r\t\t\t\t__UpperCAmelCase : Tuple = {} if is_torch_available() else {}\r\t\t\t\t__UpperCAmelCase : Optional[Any] = False\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tEsmFoldModelTester(self\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tConfigTester(self , config_class=lowercase__ , hidden_size=3_7\t\t\t\t\t\t)\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tDict\t\t\t\t\t\t):\r\t\t\t\t\t\tself.config_tester.run_common_tests()\r\r\r\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[int]\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tself.model_tester.prepare_config_and_inputs()\r\t\t\t\t\t\tself.model_tester.create_and_check_model(*lowercase__\t\t\t\t\t\t)\r\r\r\r\t\t\t\t@unittest.skip(\"Does not support attention outputs\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"Esm does not support embedding resizing\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[int]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"Esm does not support embedding resizing\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[str]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support passing input embeds!\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tint\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support head pruning.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support head pruning.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support head pruning.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support head pruning.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[str]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support head pruning.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[str]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not output hidden states in the normal way.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMfold does not output hidden states in the normal way.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold only has one output format.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"This test doesn't work for ESMFold and doesn't test core functionality\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold does not support input chunking.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tstr\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold doesn't support torchscript compilation.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold doesn't support torchscript compilation.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tTuple\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold doesn't support torchscript compilation.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tList[str]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"ESMFold doesn't support data parallel.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tOptional[Any]\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\t\t\t\t@unittest.skip(\"Will be fixed soon by reducing the size of the model used for common tests.\"\t\t\t\t\t\t)\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tAny\t\t\t\t\t\t):\r\t\t\t\t\t\tpass\r\r\r\r\r\r@require_torch\rclass lowerCAmelCase__ (\t\t\tlowerCAmelCase_ ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef snake_case ( self\t\t\t\t\t:\tUnion[str, Any]\t\t\t\t\t\t):\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tEsmForProteinFolding.from_pretrained(\"facebook/esmfold_v1\"\t\t\t\t\t\t).float()\r\t\t\t\t\t\tmodel.eval()\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tOptional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\ttorch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]]\t\t\t\t\t\t)\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\tmodel(lowercase__\t\t\t\t\t\t)[\"positions\"]\r\t\t\t\t\t\t__lowercase\t\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\ttorch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa\t\t\t\t\t\t)\r\t\t\t\t\t\tself.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase__ , atol=1e-4\t\t\t\t\t\t)\t\t\t\t\t\t)\r"},"style_context_codestyle":{"kind":"number","value":281,"string":"281"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":528,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nimport json\nimport logging\nimport math\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nfrom datasets import Dataset, load_dataset\n\nimport transformers\nfrom transformers import (\n CONFIG_MAPPING,\n MODEL_FOR_MASKED_LM_MAPPING,\n AutoConfig,\n AutoModelForMaskedLM,\n AutoTokenizer,\n DataCollatorForWholeWordMask,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nfrom transformers.trainer_utils import get_last_checkpoint, is_main_process\n\n\nUpperCamelCase =\t\t\t\tlogging.getLogger(__name__)\nUpperCamelCase =\t\t\t\tlist(MODEL_FOR_MASKED_LM_MAPPING.keys())\nUpperCamelCase =\t\t\t\ttuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n@dataclass\nclass lowerCAmelCase_\t\t\t\t:\n\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": (\n\t\t\t\t \"\"\"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.\"\"\"\n\t\t\t\t )\n\t\t\t\t }\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"If training from scratch, pass a model type from the list: \"\"\" + \"\"\", \"\"\".join(lowercase\t)}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": (\n\t\t\t\t \"\"\"Override some existing default config settings when a model is trained from scratch. Example: \"\"\"\n\t\t\t\t \"\"\"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\"\"\n\t\t\t\t )\n\t\t\t\t }\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Pretrained config name or path if not the same as model_name\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Pretrained tokenizer name or path if not the same as model_name\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Where do you want to store the pretrained models downloaded from huggingface.co\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: bool =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: str =\t\t\tfield(\n\t\t\t\t default=\"\"\"main\"\"\"\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"The specific model version to use (can be a branch name, tag name or commit id).\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: bool =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": (\n\t\t\t\t \"\"\"Will use the token generated when running `huggingface-cli login` (necessary to use this script \"\"\"\n\t\t\t\t \"\"\"with private models).\"\"\"\n\t\t\t\t )\n\t\t\t\t }\t\t\t\t\t\t, )\n\n\n\n\n\n\n\n\t\t\t\tdef \t\t\t\t__a ( self :Tuple ):\n\t\t\t\t\tif self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):\n\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t \"\"\"--config_overrides can't be used in combination with --config_name or --model_name_or_path\"\"\" )\n\n@dataclass\nclass lowerCAmelCase_\t\t\t\t:\n\n\n\n\n\n\n\n\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"The name of the dataset to use (via the datasets library).\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"The configuration name of the dataset to use (via the datasets library).\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"The input training data file (a text file).\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"An optional input evaluation data file to evaluate the perplexity on (a text file).\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"An optional input train ref data file for whole word masking in Chinese.\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[str] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"An optional input validation ref data file for whole word masking in Chinese.\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: bool =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Overwrite the cached training and evaluation sets\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: Optional[int] =\t\t\tfield(\n\t\t\t\t default=5\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": \"\"\"The percentage of the train set used as validation set in case there's no validation split\"\"\"\n\t\t\t\t }\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[int] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": (\n\t\t\t\t \"\"\"The maximum total input sequence length after tokenization. Sequences longer \"\"\"\n\t\t\t\t \"\"\"than this will be truncated. Default to the max input length of the model.\"\"\"\n\t\t\t\t )\n\t\t\t\t }\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: Optional[int] =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"The number of processes to use for the preprocessing.\"\"\"}\t\t\t\t\t\t, )\n\t\t\t\t_snake_case\t\t\t\t: float =\t\t\tfield(\n\t\t\t\t default=0.15\t\t\t\t\t\t, metadata={\"\"\"help\"\"\": \"\"\"Ratio of tokens to mask for masked language modeling loss\"\"\"}\t)\n\t\t\t\t_snake_case\t\t\t\t: bool =\t\t\tfield(\n\t\t\t\t default=lowercase\t\t\t\t\t\t, metadata={\n\t\t\t\t \"\"\"help\"\"\": (\n\t\t\t\t \"\"\"Whether to pad all samples to `max_seq_length`. \"\"\"\n\t\t\t\t \"\"\"If False, will pad the samples dynamically when batching to the maximum length in the batch.\"\"\"\n\t\t\t\t )\n\t\t\t\t }\t\t\t\t\t\t, )\n\n\n\n\n\n\n\n\t\t\t\tdef \t\t\t\t__a ( self :Dict ):\n\t\t\t\t\tif self.train_file is not None:\n\t\t\t\t\t\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= self.train_file.split(\"\"\".\"\"\" )[-1]\n\t\t\t\t\t\tassert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, a json or a txt file.\"\n\t\t\t\t\tif self.validation_file is not None:\n\t\t\t\t\t\tUpperCamelCase__ :Optional[int]\t\t\t\t\t\t= self.validation_file.split(\"\"\".\"\"\" )[-1]\n\t\t\t\t\t\tassert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, a json or a txt file.\"\n\n\n\n\ndef \t\t\t\tA\t\t(\t\t\t\t\tlowercase__ : Optional[Any] ,\t\tlowercase__ : str )\t\t-> List[Any]:\n\twith open(lowercase__ ,\t\t\"\"\"r\"\"\" ,\t\tencoding=\"\"\"utf-8\"\"\" ) as f:\n\t\tUpperCamelCase__ :Dict\t\t\t\t\t\t= [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]\n\tassert len(lowercase__ ) == len(lowercase__ )\n\n\tUpperCamelCase__ :int\t\t\t\t\t\t= {c: dataset[c] for c in dataset.column_names}\n\tUpperCamelCase__ :List[Any]\t\t\t\t\t\t= refs\n\treturn Dataset.from_dict(lowercase__ )\n\n\n\n\ndef \t\t\t\tA\t\t(\t\t\t\t\t)\t\t-> Dict:\n\t# See all possible arguments in src/transformers/training_args.py\n\t# or by passing the --help flag to this script.\n\t# We now keep distinct sets of args, for a cleaner separation of concerns.\n\n\tUpperCamelCase__ :Any\t\t\t\t\t\t= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )\n\tif len(sys.argv ) == 2 and sys.argv[1].endswith(\"\"\".json\"\"\" ):\n\t\t# If we pass only one argument to the script and it's the path to a json file,\n\t\t# let's parse it to get our arguments.\n\t\tUpperCamelCase__\t\t\t\t,\t\tUpperCamelCase__\t\t\t\t,\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )\n\telse:\n\t\tUpperCamelCase__\t\t\t\t,\t\tUpperCamelCase__\t\t\t\t,\t\tUpperCamelCase__ :Dict\t\t\t\t\t\t= parser.parse_args_into_dataclasses()\n\n\t# Detecting last checkpoint.\n\tUpperCamelCase__ :int\t\t\t\t\t\t= None\n\tif os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:\n\t\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= get_last_checkpoint(training_args.output_dir )\n\t\tif last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:\n\t\t\traise ValueError(\n\t\t\t f\"\"\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\"\"\n\t\t\t \"\"\"Use --overwrite_output_dir to overcome.\"\"\" )\n\t\telif last_checkpoint is not None:\n\t\t\tlogger.info(\n\t\t\t f\"\"\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\"\"\n\t\t\t \"\"\"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\"\" )\n\n # Setup logging\n\tlogging.basicConfig(\n\t format=\"\"\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\"\" ,\t\tdatefmt=\"\"\"%m/%d/%Y %H:%M:%S\"\"\" ,\t\thandlers=[logging.StreamHandler(sys.stdout )] ,\t\t)\n\tlogger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )\n\n\t# Log on each process the small summary:\n\tlogger.warning(\n\t f\"\"\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\"\"\n\t + f\"\"\"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}\"\"\" )\n\t# Set the verbosity to info of the Transformers logger (on main process only):\n\tif is_main_process(training_args.local_rank ):\n\t\ttransformers.utils.logging.set_verbosity_info()\n\t\ttransformers.utils.logging.enable_default_handler()\n\t\ttransformers.utils.logging.enable_explicit_format()\n\tlogger.info(\"\"\"Training/evaluation parameters %s\"\"\" ,\t\tlowercase__ )\n\n\t# Set seed before initializing model.\n\tset_seed(training_args.seed )\n\n\t# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n\t# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n\t# (the dataset will be downloaded automatically from the datasets Hub).\n\t#\n\t# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called\n\t# 'text' is found. You can easily tweak this behavior (see below).\n\t#\n\t# In distributed training, the load_dataset function guarantee that only one local process can concurrently\n\t# download the dataset.\n\tif data_args.dataset_name is not None:\n\t\t# Downloading and loading a dataset from the hub.\n\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= load_dataset(data_args.dataset_name ,\t\tdata_args.dataset_config_name )\n\t\tif \"validation\" not in datasets.keys():\n\t\t\tUpperCamelCase__ :Any\t\t\t\t\t\t= load_dataset(\n\t\t\t data_args.dataset_name ,\t\tdata_args.dataset_config_name ,\t\tsplit=f\"\"\"train[:{data_args.validation_split_percentage}%]\"\"\" ,\t\t)\n\t\t\tUpperCamelCase__ :Dict\t\t\t\t\t\t= load_dataset(\n\t\t\t data_args.dataset_name ,\t\tdata_args.dataset_config_name ,\t\tsplit=f\"\"\"train[{data_args.validation_split_percentage}%:]\"\"\" ,\t\t)\n\telse:\n\t\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= {}\n\t\tif data_args.train_file is not None:\n\t\t\tUpperCamelCase__ :List[Any]\t\t\t\t\t\t= data_args.train_file\n\t\tif data_args.validation_file is not None:\n\t\t\tUpperCamelCase__ :str\t\t\t\t\t\t= data_args.validation_file\n\t\tUpperCamelCase__ :Tuple\t\t\t\t\t\t= data_args.train_file.split(\"\"\".\"\"\" )[-1]\n\t\tif extension == \"txt\":\n\t\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= \"\"\"text\"\"\"\n\t\tUpperCamelCase__ :Optional[int]\t\t\t\t\t\t= load_dataset(lowercase__ ,\t\tdata_files=lowercase__ )\n\t# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n\t# https://huggingface.co/docs/datasets/loading_datasets.html.\n\n\t# Load pretrained model and tokenizer\n\t#\n\t# Distributed training:\n\t# The .from_pretrained methods guarantee that only one local process can concurrently\n\t# download model & vocab.\n\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= {\n\t \"\"\"cache_dir\"\"\": model_args.cache_dir,\n\t \"\"\"revision\"\"\": model_args.model_revision,\n\t \"\"\"use_auth_token\"\"\": True if model_args.use_auth_token else None,\n\t}\n\tif model_args.config_name:\n\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= AutoConfig.from_pretrained(model_args.config_name ,\t\t**lowercase__ )\n\telif model_args.model_name_or_path:\n\t\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= AutoConfig.from_pretrained(model_args.model_name_or_path ,\t\t**lowercase__ )\n\telse:\n\t\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= CONFIG_MAPPING[model_args.model_type]()\n\t\tlogger.warning(\"\"\"You are instantiating a new config instance from scratch.\"\"\" )\n\t\tif model_args.config_overrides is not None:\n\t\t\tlogger.info(f\"\"\"Overriding config: {model_args.config_overrides}\"\"\" )\n\t\t\tconfig.update_from_string(model_args.config_overrides )\n\t\t\tlogger.info(f\"\"\"New config: {config}\"\"\" )\n\n\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= {\n\t \"\"\"cache_dir\"\"\": model_args.cache_dir,\n\t \"\"\"use_fast\"\"\": model_args.use_fast_tokenizer,\n\t \"\"\"revision\"\"\": model_args.model_revision,\n\t \"\"\"use_auth_token\"\"\": True if model_args.use_auth_token else None,\n\t}\n\tif model_args.tokenizer_name:\n\t\tUpperCamelCase__ :Optional[int]\t\t\t\t\t\t= AutoTokenizer.from_pretrained(model_args.tokenizer_name ,\t\t**lowercase__ )\n\telif model_args.model_name_or_path:\n\t\tUpperCamelCase__ :Any\t\t\t\t\t\t= AutoTokenizer.from_pretrained(model_args.model_name_or_path ,\t\t**lowercase__ )\n\telse:\n\t\traise ValueError(\n\t\t \"\"\"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\"\"\n\t\t \"\"\"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\"\" )\n\n\tif model_args.model_name_or_path:\n\t\tUpperCamelCase__ :Tuple\t\t\t\t\t\t= AutoModelForMaskedLM.from_pretrained(\n\t\t model_args.model_name_or_path ,\t\tfrom_tf=bool(\"\"\".ckpt\"\"\" in model_args.model_name_or_path ) ,\t\tconfig=lowercase__ ,\t\tcache_dir=model_args.cache_dir ,\t\trevision=model_args.model_revision ,\t\tuse_auth_token=True if model_args.use_auth_token else None ,\t\t)\n\telse:\n\t\tlogger.info(\"\"\"Training new model from scratch\"\"\" )\n\t\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= AutoModelForMaskedLM.from_config(lowercase__ )\n\n\tmodel.resize_token_embeddings(len(lowercase__ ) )\n\n\t# Preprocessing the datasets.\n\t# First we tokenize all the texts.\n\tif training_args.do_train:\n\t\tUpperCamelCase__ :Dict\t\t\t\t\t\t= datasets[\"\"\"train\"\"\"].column_names\n\telse:\n\t\tUpperCamelCase__ :str\t\t\t\t\t\t= datasets[\"\"\"validation\"\"\"].column_names\n\tUpperCamelCase__ :Optional[int]\t\t\t\t\t\t= \"\"\"text\"\"\" if \"\"\"text\"\"\" in column_names else column_names[0]\n\n\tUpperCamelCase__ :str\t\t\t\t\t\t= \"\"\"max_length\"\"\" if data_args.pad_to_max_length else False\n\n\tdef tokenize_function(lowercase__ : str ):\n\t\t# Remove empty lines\n\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= [line for line in examples[\"\"\"text\"\"\"] if len(lowercase__ ) > 0 and not line.isspace()]\n\t\treturn tokenizer(examples[\"\"\"text\"\"\"] ,\t\tpadding=lowercase__ ,\t\ttruncation=lowercase__ ,\t\tmax_length=data_args.max_seq_length )\n\n\tUpperCamelCase__ :int\t\t\t\t\t\t= datasets.map(\n\t lowercase__ ,\t\tbatched=lowercase__ ,\t\tnum_proc=data_args.preprocessing_num_workers ,\t\tremove_columns=[text_column_name] ,\t\tload_from_cache_file=not data_args.overwrite_cache ,\t\t)\n\n\t# Add the chinese references if provided\n\tif data_args.train_ref_file is not None:\n\t\tUpperCamelCase__ :Tuple\t\t\t\t\t\t= add_chinese_references(tokenized_datasets[\"\"\"train\"\"\"] ,\t\tdata_args.train_ref_file )\n\tif data_args.validation_ref_file is not None:\n\t\tUpperCamelCase__ :Tuple\t\t\t\t\t\t= add_chinese_references(\n\t\t tokenized_datasets[\"\"\"validation\"\"\"] ,\t\tdata_args.validation_ref_file )\n\t# If we have ref files, need to avoid it removed by trainer\n\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= data_args.train_ref_file or data_args.validation_ref_file\n\tif has_ref:\n\t\tUpperCamelCase__ :List[str]\t\t\t\t\t\t= False\n\n\t# Data collator\n\t# This one will take care of randomly masking the tokens.\n\tUpperCamelCase__ :str\t\t\t\t\t\t= DataCollatorForWholeWordMask(tokenizer=lowercase__ ,\t\tmlm_probability=data_args.mlm_probability )\n\n\t# Initialize our Trainer\n\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= Trainer(\n\t model=lowercase__ ,\t\targs=lowercase__ ,\t\ttrain_dataset=tokenized_datasets[\"\"\"train\"\"\"] if training_args.do_train else None ,\t\teval_dataset=tokenized_datasets[\"\"\"validation\"\"\"] if training_args.do_eval else None ,\t\ttokenizer=lowercase__ ,\t\tdata_collator=lowercase__ ,\t\t)\n\n\t# Training\n\tif training_args.do_train:\n\t\tif last_checkpoint is not None:\n\t\t\tUpperCamelCase__ :List[Any]\t\t\t\t\t\t= last_checkpoint\n\t\telif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):\n\t\t\tUpperCamelCase__ :int\t\t\t\t\t\t= model_args.model_name_or_path\n\t\telse:\n\t\t\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= None\n\t\tUpperCamelCase__ :List[Any]\t\t\t\t\t\t= trainer.train(resume_from_checkpoint=lowercase__ )\n\t\ttrainer.save_model() # Saves the tokenizer too for easy upload\n\n\t\tUpperCamelCase__ :int\t\t\t\t\t\t= os.path.join(training_args.output_dir ,\t\t\"\"\"train_results.txt\"\"\" )\n\t\tif trainer.is_world_process_zero():\n\t\t\twith open(lowercase__ ,\t\t\"\"\"w\"\"\" ) as writer:\n\t\t\t\tlogger.info(\"\"\"***** Train results *****\"\"\" )\n\t\t\t\tfor key, value in sorted(train_result.metrics.items() ):\n\t\t\t\t\tlogger.info(f\"\"\" {key} = {value}\"\"\" )\n\t\t\t\t\twriter.write(f\"\"\"{key} = {value}\\n\"\"\" )\n\n # Need to save the state, since Trainer.save_model saves only the tokenizer with the model\n\t\t\ttrainer.state.save_to_json(os.path.join(training_args.output_dir ,\t\t\"\"\"trainer_state.json\"\"\" ) )\n\n # Evaluation\n\tUpperCamelCase__ :Optional[Any]\t\t\t\t\t\t= {}\n\tif training_args.do_eval:\n\t\tlogger.info(\"\"\"*** Evaluate ***\"\"\" )\n\n\t\tUpperCamelCase__ :str\t\t\t\t\t\t= trainer.evaluate()\n\n\t\tUpperCamelCase__ :Dict\t\t\t\t\t\t= math.exp(eval_output[\"\"\"eval_loss\"\"\"] )\n\t\tUpperCamelCase__ :int\t\t\t\t\t\t= perplexity\n\n\t\tUpperCamelCase__ :Union[str, Any]\t\t\t\t\t\t= os.path.join(training_args.output_dir ,\t\t\"\"\"eval_results_mlm_wwm.txt\"\"\" )\n\t\tif trainer.is_world_process_zero():\n\t\t\twith open(lowercase__ ,\t\t\"\"\"w\"\"\" ) as writer:\n\t\t\t\tlogger.info(\"\"\"***** Eval results *****\"\"\" )\n\t\t\t\tfor key, value in sorted(results.items() ):\n\t\t\t\t\tlogger.info(f\"\"\" {key} = {value}\"\"\" )\n\t\t\t\t\twriter.write(f\"\"\"{key} = {value}\\n\"\"\" )\n\n\treturn results\n\n\n\n\ndef \t\t\t\tA\t\t(\t\t\t\t\tlowercase__ : Tuple )\t\t-> Tuple:\n\t# For xla_spawn (TPUs)\n\tmain()\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tmain()"},"code_codestyle":{"kind":"number","value":45,"string":"45"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\ndef __UpperCAmelCase ( __UpperCamelCase\t,\t\t\t\t__UpperCamelCase\t):\r\n\t\t\tif digit_amount > 0:\r\n\t\t\t\t\t\treturn round(number - int(__UpperCamelCase\t)\t,\t\t\t\t__UpperCamelCase\t)\r\n\t\t\treturn number - int(__UpperCamelCase\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\tprint(decimal_isolate(1.53, 0))\r\n\t\t\tprint(decimal_isolate(35.345, 1))\r\n\t\t\tprint(decimal_isolate(35.345, 2))\r\n\t\t\tprint(decimal_isolate(35.345, 3))\r\n\t\t\tprint(decimal_isolate(-14.789, 3))\r\n\t\t\tprint(decimal_isolate(0, 2))\r\n\t\t\tprint(decimal_isolate(-14.123, 1))\r\n\t\t\tprint(decimal_isolate(-14.123, 2))\r\n\t\t\tprint(decimal_isolate(-14.123, 3))\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":76,"string":"76"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":529,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nimport pytest\n\nfrom datasets import Dataset, DatasetDict, Features, NamedSplit, Value\nfrom datasets.io.text import TextDatasetReader\n\nfrom ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases\n\n\n\n\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n assert isinstance(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n assert dataset.num_rows == 4\n assert dataset.num_columns == 1\n assert dataset.column_names == [\"text\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\n\n\n@pytest.mark.parametrize(\"keep_in_memory\"\t\t\t\t\t,\t[False, True] )\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n _snake_case \t\t=\t\t\t\tTextDatasetReader(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tkeep_in_memory=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_dataset(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n\n\n\n@pytest.mark.parametrize(\n \"features\"\t\t\t\t\t,\t[\n None,\n {\"text\": \"string\"},\n {\"text\": \"int32\"},\n {\"text\": \"float32\"},\n ]\t\t\t\t\t,\t)\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n _snake_case \t\t=\t\t\t\tfeatures.copy() if features else default_expected_features\n _snake_case \t\t=\t\t\t\t(\n Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None\n )\n _snake_case \t\t=\t\t\t\tTextDatasetReader(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tfeatures=SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_dataset(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n\n\n\n@pytest.mark.parametrize(\"split\"\t\t\t\t\t,\t[None, NamedSplit(\"train\" ), \"train\", \"test\"] )\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n _snake_case \t\t=\t\t\t\tTextDatasetReader(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tsplit=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_dataset(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n assert dataset.split == split if split else \"train\"\n\n\n\n\n@pytest.mark.parametrize(\"path_type\"\t\t\t\t\t,\t[str, list] )\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n if issubclass(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n _snake_case \t\t=\t\t\t\ttext_path\n elif issubclass(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n _snake_case \t\t=\t\t\t\t[text_path]\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n _snake_case \t\t=\t\t\t\tTextDatasetReader(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_dataset(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n\n\n\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__=(\"train\",) ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n assert isinstance(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n for split in splits:\n _snake_case \t\t=\t\t\t\tdataset_dict[split]\n assert dataset.num_rows == 4\n assert dataset.num_columns == 1\n assert dataset.column_names == [\"text\"]\n for feature, expected_dtype in expected_features.items():\n assert dataset.features[feature].dtype == expected_dtype\n\n\n\n\n@pytest.mark.parametrize(\"keep_in_memory\"\t\t\t\t\t,\t[False, True] )\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():\n _snake_case \t\t=\t\t\t\tTextDatasetReader({\"train\": text_path}\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tkeep_in_memory=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_datasetdict(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n\n\n\n@pytest.mark.parametrize(\n \"features\"\t\t\t\t\t,\t[\n None,\n {\"text\": \"string\"},\n {\"text\": \"int32\"},\n {\"text\": \"float32\"},\n ]\t\t\t\t\t,\t)\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n # CSV file loses col_1 string dtype information: default now is \"int64\" instead of \"string\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n _snake_case \t\t=\t\t\t\tfeatures.copy() if features else default_expected_features\n _snake_case \t\t=\t\t\t\t(\n Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None\n )\n _snake_case \t\t=\t\t\t\tTextDatasetReader({\"train\": text_path}\t\t\t\t\t,\tfeatures=SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_datasetdict(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n\n\n\n@pytest.mark.parametrize(\"split\"\t\t\t\t\t,\t[None, NamedSplit(\"train\" ), \"train\", \"test\"] )\ndef \t\t\t\tsnake_case_ (\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ ):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n if split:\n _snake_case \t\t=\t\t\t\t{split: text_path}\n else:\n _snake_case \t\t=\t\t\t\t\"train\"\n _snake_case \t\t=\t\t\t\t{\"train\": text_path, \"test\": text_path}\n _snake_case \t\t=\t\t\t\ttmp_path / \"cache\"\n _snake_case \t\t=\t\t\t\t{\"text\": \"string\"}\n _snake_case \t\t=\t\t\t\tTextDatasetReader(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tcache_dir=SCREAMING_SNAKE_CASE__ ).read()\n _check_text_datasetdict(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tsplits=list(path.keys() ) )\n assert all(dataset[split].split == split for split in path.keys() )\n\n\n"},"code_codestyle":{"kind":"number","value":705,"string":"705"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n'''simple docstring'''\n\n\n\nimport math\n\n\nclass __SCREAMING_SNAKE_CASE\t:\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n def \tUpperCamelCase(\t\t\t\t\tself\t\t\t\t\t\t, lowerCamelCase\t\t\t\t\t\t, lowerCamelCase\t):\n _snake_case \t\t=\t\t\t\t0.0\n _snake_case \t\t=\t\t\t\t0.0\n for i in range(len(lowerCamelCase\t)\t):\n da += math.pow((sample[i] - weights[0][i])\t\t\t\t\t\t, 2\t)\n da += math.pow((sample[i] - weights[1][i])\t\t\t\t\t\t, 2\t)\n return 0 if da > da else 1\n return 0\n\n\n\n\n\n\n def \tUpperCamelCase(\t\t\t\t\tself\t\t\t\t\t\t, lowerCamelCase\t\t\t\t\t\t, lowerCamelCase\t\t\t\t\t\t, lowerCamelCase\t\t\t\t\t\t, lowerCamelCase\t):\n for i in range(len(lowerCamelCase\t)\t):\n weights[j][i] += alpha * (sample[i] - weights[j][i])\n return weights\n\n\n\n\n\n\n\ndef \t\t\t\tsnake_case_ (\t\t):\n\n\n\n\n\n\n\n '''simple docstring'''\n\n\n\n _snake_case \t\t=\t\t\t\t[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]\n\n # weight initialization ( n, C )\n _snake_case \t\t=\t\t\t\t[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]\n\n # training\n _snake_case \t\t=\t\t\t\tSelfOrganizingMap()\n _snake_case \t\t=\t\t\t\t3\n _snake_case \t\t=\t\t\t\t0.5\n\n for _ in range(SCREAMING_SNAKE_CASE__ ):\n for j in range(len(SCREAMING_SNAKE_CASE__ ) ):\n # training sample\n _snake_case \t\t=\t\t\t\ttraining_samples[j]\n\n # Compute the winning vector\n _snake_case \t\t=\t\t\t\tself_organizing_map.get_winner(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n # Update the winning vector\n _snake_case \t\t=\t\t\t\tself_organizing_map.update(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n # classify test sample\n _snake_case \t\t=\t\t\t\t[0, 0, 0, 1]\n _snake_case \t\t=\t\t\t\tself_organizing_map.get_winner(SCREAMING_SNAKE_CASE__\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__ )\n\n # results\n print(f'''Clusters that the test sample belongs to : {winner}''' )\n print(f'''Weights that have been trained : {weights}''' )\n\n\n# running the main() function\nif __name__ == \"__main__\":\n main()\n\n\n"},"style_context_codestyle":{"kind":"number","value":368,"string":"368"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":530,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ ):\r\r\tif p < 2:\r\t\traise ValueError(\"\"\"p should not be less than 2!\"\"\" )\r\telif p == 2:\r\t\treturn True\r\r\tA_\t\t: Dict\t\t\t\t\t\t = 4\r\tA_\t\t: int\t\t\t\t\t\t = (1 << p) - 1\r\tfor _ in range(p - 2 ):\r\t\tA_\t\t: Union[str, Any]\t\t\t\t\t\t = ((s * s) - 2) % m\r\treturn s == 0\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tprint(lucas_lehmer_test(7))\r\t\t\t\t\t\t\tprint(lucas_lehmer_test(11))\r\r"},"code_codestyle":{"kind":"number","value":180,"string":"180"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\rimport math\rimport os\rimport sys\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ ):\r\tA_\t\t: Optional[Any]\t\t\t\t\t\t = \"\"\"\"\"\"\r\ttry:\r\t\twith open(snake_case__ , \"\"\"rb\"\"\" ) as binary_file:\r\t\t\tA_\t\t: Union[str, Any]\t\t\t\t\t\t = binary_file.read()\r\t\tfor dat in data:\r\t\t\tA_\t\t: Dict\t\t\t\t\t\t = F\"\"\"{dat:08b}\"\"\"\r\t\t\tresult += curr_byte\r\t\treturn result\r\texcept OSError:\r\t\tprint(\"\"\"File not accessible\"\"\" )\r\t\tsys.exit()\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ , snake_case__ , snake_case__ , snake_case__ ):\r\tlexicon.pop(snake_case__ )\r\tA_\t\t: List[str]\t\t\t\t\t\t = last_match_id\r\r\tif math.loga(snake_case__ ).is_integer():\r\t\tfor curr_key in lexicon:\r\t\t\tA_\t\t: Dict\t\t\t\t\t\t = \"\"\"0\"\"\" + lexicon[curr_key]\r\r\tA_\t\t: int\t\t\t\t\t\t = bin(snake_case__ )[2:]\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ ):\r\tA_\t\t: Dict\t\t\t\t\t\t = {\"\"\"0\"\"\": \"\"\"0\"\"\", \"\"\"1\"\"\": \"\"\"1\"\"\"}\r\tA_ , A_\t\t: Optional[int]\t\t\t\t\t\t = \"\"\"\"\"\", \"\"\"\"\"\"\r\tA_\t\t: Tuple\t\t\t\t\t\t = len(snake_case__ )\r\r\tfor i in range(len(snake_case__ ) ):\r\t\tcurr_string += data_bits[i]\r\t\tif curr_string not in lexicon:\r\t\t\tcontinue\r\r\t\tA_\t\t: List[str]\t\t\t\t\t\t = lexicon[curr_string]\r\t\tresult += last_match_id\r\t\tadd_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )\r\t\tindex += 1\r\t\tA_\t\t: int\t\t\t\t\t\t = \"\"\"\"\"\"\r\r\twhile curr_string != \"\" and curr_string not in lexicon:\r\t\tcurr_string += \"0\"\r\r\tif curr_string != \"\":\r\t\tA_\t\t: Any\t\t\t\t\t\t = lexicon[curr_string]\r\t\tresult += last_match_id\r\r\treturn result\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ , snake_case__ ):\r\tA_\t\t: Optional[int]\t\t\t\t\t\t = os.path.getsize(snake_case__ )\r\tA_\t\t: Dict\t\t\t\t\t\t = bin(snake_case__ )[2:]\r\tA_\t\t: Optional[Any]\t\t\t\t\t\t = len(snake_case__ )\r\r\treturn \"0\" * (length_length - 1) + file_length_binary + compressed\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ , snake_case__ ):\r\tA_\t\t: Tuple\t\t\t\t\t\t = 8\r\ttry:\r\t\twith open(snake_case__ , \"\"\"wb\"\"\" ) as opened_file:\r\t\t\tA_\t\t: Dict\t\t\t\t\t\t = [\r\t\t\t to_write[i : i + byte_length]\r\t\t\t for i in range(0 , len(snake_case__ ) , snake_case__ )\r\t\t\t]\r\r\t\t\tif len(result_byte_array[-1] ) % byte_length == 0:\r\t\t\t\tresult_byte_array.append(\"\"\"10000000\"\"\" )\r\t\t\telse:\r\t\t\t\tresult_byte_array[-1] += \"1\" + \"0\" * (\r\t\t\t\t byte_length - len(result_byte_array[-1] ) - 1\r\t\t\t\t)\r\r\t\t\tfor elem in result_byte_array:\r\t\t\t\topened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder=\"\"\"big\"\"\" ) )\r\texcept OSError:\r\t\tprint(\"\"\"File not accessible\"\"\" )\r\t\tsys.exit()\r\r\r\r\r\r\rdef __UpperCamelCase\t\t\t\t\t\t(\t\tsnake_case__ , snake_case__ ):\r\tA_\t\t: List[str]\t\t\t\t\t\t = read_file_binary(snake_case__ )\r\tA_\t\t: str\t\t\t\t\t\t = compress_data(snake_case__ )\r\tA_\t\t: int\t\t\t\t\t\t = add_file_length(snake_case__ , snake_case__ )\r\twrite_file_binary(snake_case__ , snake_case__ )\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t\t\tcompress(sys.argv[1], sys.argv[2])\r\r"},"style_context_codestyle":{"kind":"number","value":180,"string":"180"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":531,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n'''simple docstring'''\n\n\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available\n\n\n__a =\t\t\t\t{\n '''configuration_chinese_clip''': [\n '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',\n '''ChineseCLIPConfig''',\n '''ChineseCLIPOnnxConfig''',\n '''ChineseCLIPTextConfig''',\n '''ChineseCLIPVisionConfig''',\n ],\n '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],\n}\n\ntry:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __a =\t\t\t\t['''ChineseCLIPFeatureExtractor''']\n __a =\t\t\t\t['''ChineseCLIPImageProcessor''']\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __a =\t\t\t\t[\n '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',\n '''ChineseCLIPModel''',\n '''ChineseCLIPPreTrainedModel''',\n '''ChineseCLIPTextModel''',\n '''ChineseCLIPVisionModel''',\n ]\n\nif TYPE_CHECKING:\n from .configuration_chinese_clip import (\n CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,\n ChineseCLIPConfig,\n ChineseCLIPOnnxConfig,\n ChineseCLIPTextConfig,\n ChineseCLIPVisionConfig,\n )\n from .processing_chinese_clip import ChineseCLIPProcessor\n\n try:\n if not is_vision_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_chinese_clip import (\n CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\n ChineseCLIPModel,\n ChineseCLIPPreTrainedModel,\n ChineseCLIPTextModel,\n ChineseCLIPVisionModel,\n )\n\nelse:\n import sys\n\n __a =\t\t\t\t_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)"},"code_codestyle":{"kind":"number","value":710,"string":"710"},"style_context":{"kind":"string","value":"\n\n\n\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport unittest\nfrom multiprocessing import get_context\nfrom pathlib import Path\n\nimport datasets\nimport numpy as np\nfrom datasets import load_dataset\nfrom parameterized import parameterized\n\nfrom transformers import AutoProcessor\nfrom transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor\nfrom transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES\nfrom transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow\nfrom transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available\n\nfrom ..wavaveca.test_feature_extraction_wavaveca import floats_list\n\n\nif is_pyctcdecode_available():\n from huggingface_hub import snapshot_download\n from pyctcdecode import BeamSearchDecoderCTC\n\n from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM\n from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput\n\nif is_torch_available():\n from transformers import WavaVecaForCTC\n\n@require_pyctcdecode\nclass \t\t\t\t\t\t__a( unittest.TestCase ):\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[Any]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = '''| a b c d e f g h i j k'''.split()\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t\t\t)\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = {\n '''unk_token''': '''''',\n '''bos_token''': '''''',\n '''eos_token''': '''''',\n }\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = {\n '''feature_size''': 1,\n '''padding_value''': 0.0,\n '''sampling_rate''': 16_000,\n '''return_attention_mask''': False,\n '''do_normalize''': True,\n }\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = tempfile.mkdtemp()\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file''']\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n with open(self.vocab_file ,'''w''' ,encoding='''utf-8'''\t\t\t\t\t\t) as fp:\n fp.write(json.dumps(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) + '''\\n'''\t\t\t\t\t\t)\n\n with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8'''\t\t\t\t\t\t) as fp:\n fp.write(json.dumps(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t) + '''\\n'''\t\t\t\t\t\t)\n\n # load decoder from hub\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = '''hf-internal-testing/ngram-beam-search-decoder'''\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t->\tDict:\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = self.add_kwargs_tokens_map.copy()\n kwargs.update(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t->\tAny:\n return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t->\tDict:\n return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tint:\n shutil.rmtree(self.tmpdirname\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tint:\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n processor.save_pretrained(self.tmpdirname\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname\t\t\t\t\t\t)\n\n # tokenizer\n self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab()\t\t\t\t\t\t)\n self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n # feature extractor\n self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string()\t\t\t\t\t\t)\n self.assertIsInstance(processor.feature_extractor ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n # decoder\n self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels\t\t\t\t\t\t)\n self.assertEqual(\n processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)\n self.assertIsInstance(processor.decoder ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tTuple:\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = WavaVecaProcessorWithLM(\n tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder()\t\t\t\t\t\t)\n processor.save_pretrained(self.tmpdirname\t\t\t\t\t\t)\n\n # make sure that error is thrown when decoder alphabet doesn't match\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = WavaVecaProcessorWithLM.from_pretrained(\n self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3\t\t\t\t\t\t)\n\n # decoder\n self.assertEqual(processor.language_model.alpha ,5.0\t\t\t\t\t\t)\n self.assertEqual(processor.language_model.beta ,3.0\t\t\t\t\t\t)\n self.assertEqual(processor.language_model.score_boundary ,-7.0\t\t\t\t\t\t)\n self.assertEqual(processor.language_model.unk_score_offset ,3\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tDict:\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = self.get_tokenizer()\n # add token to trigger raise\n tokenizer.add_tokens(['''xx''']\t\t\t\t\t\t)\n with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''include'''\t\t\t\t\t\t):\n WavaVecaProcessorWithLM(\n tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder()\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tOptional[int]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = floats_list((3, 1_000)\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np'''\t\t\t\t\t\t)\n\n for key in input_feat_extract.keys():\n self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[str]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = '''This is a test string'''\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = processor(text=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = tokenizer(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n for key in encoded_tok.keys():\n self.assertListEqual(encoded_tok[key] ,encoded_processor[key]\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self ,_SCREAMING_SNAKE_CASE=(2, 10, 16) ,_SCREAMING_SNAKE_CASE=77\t\t\t\t\t\t)\t\t\t\t->\tint:\n np.random.seed(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n return np.random.rand(*_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tDict:\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = self._get_dummy_logits(shape=(10, 16) ,seed=13\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = processor.decode(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = decoder.decode_beams(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)[0]\n\n self.assertEqual(decoded_decoder[0] ,decoded_processor.text\t\t\t\t\t\t)\n self.assertEqual(''' ''' ,decoded_processor.text\t\t\t\t\t\t)\n self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score\t\t\t\t\t\t)\n self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score\t\t\t\t\t\t)\n\n\n\n\n\n @parameterized.expand([[None], ['''fork'''], ['''spawn''']]\t\t\t\t\t\t)\n def \t\t\t\t\t\t\ta__ ( self ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t->\tOptional[Any]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = self._get_dummy_logits()\n\n # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.\n # otherwise, the LM won't be available to the pool's sub-processes.\n # manual logic used to allow parameterized test for both pool=None and pool=Pool(...)\n if pool_context is None:\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = processor.batch_decode(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n else:\n with get_context(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t).Pool() as pool:\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = processor.batch_decode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = list(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n with get_context('''fork'''\t\t\t\t\t\t).Pool() as p:\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_,\t\t\t\t\tUpperCAmelCase_,\t\t\t\t\tUpperCAmelCase_ :\t\t\t\t\t\t\tstr = [], [], []\n for beams in decoded_beams:\n texts_decoder.append(beams[0][0]\t\t\t\t\t\t)\n logit_scores_decoder.append(beams[0][-2]\t\t\t\t\t\t)\n lm_scores_decoder.append(beams[0][-1]\t\t\t\t\t\t)\n\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.text\t\t\t\t\t\t)\n self.assertListEqual([''' ''', ''' '''] ,decoded_processor.text\t\t\t\t\t\t)\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.logit_score\t\t\t\t\t\t)\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.lm_score\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tOptional[Any]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = self._get_dummy_logits()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = 15\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = -20.0\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = -4.0\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = processor.batch_decode(\n _SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = decoded_processor_out.text\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = list(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n with get_context('''fork'''\t\t\t\t\t\t).Pool() as pool:\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = decoder.decode_beams_batch(\n _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = [d[0][0] for d in decoded_decoder_out]\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = [d[0][2] for d in decoded_decoder_out]\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = [d[0][3] for d in decoded_decoder_out]\n\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n self.assertListEqual([''' ''', ''' '''] ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.logit_score\t\t\t\t\t\t)\t\t\t\t\t\t)\n self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,_SCREAMING_SNAKE_CASE ,atol=1e-3\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.lm_score\t\t\t\t\t\t)\t\t\t\t\t\t)\n self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,_SCREAMING_SNAKE_CASE ,atol=1e-3\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[str]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = self._get_dummy_logits()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = 2.0\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = 5.0\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = -20.0\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = True\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = processor.batch_decode(\n _SCREAMING_SNAKE_CASE ,alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = decoded_processor_out.text\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = list(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n decoder.reset_params(\n alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)\n\n with get_context('''fork'''\t\t\t\t\t\t).Pool() as pool:\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = decoder.decode_beams_batch(\n _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = [d[0][0] for d in decoded_decoder_out]\n\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n self.assertListEqual([''' ''', ''' '''] ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = processor.decoder.model_container[processor.decoder._model_key]\n self.assertEqual(lm_model.alpha ,2.0\t\t\t\t\t\t)\n self.assertEqual(lm_model.beta ,5.0\t\t\t\t\t\t)\n self.assertEqual(lm_model.unk_score_offset ,-20.0\t\t\t\t\t\t)\n self.assertEqual(lm_model.score_boundary ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[str]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = processor.decoder.model_container[processor.decoder._model_key]\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = Path(language_model._kenlm_model.path.decode('''utf-8'''\t\t\t\t\t\t)\t\t\t\t\t\t).parent.parent.absolute()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = os.listdir(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = ['''alphabet.json''', '''language_model''']\n\n downloaded_decoder_files.sort()\n expected_decoder_files.sort()\n\n # test that only decoder relevant files from\n # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main\n # are downloaded and none of the rest (e.g. README.md, ...)\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[Any]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = snapshot_download('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = processor.decoder.model_container[processor.decoder._model_key]\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = Path(language_model._kenlm_model.path.decode('''utf-8'''\t\t\t\t\t\t)\t\t\t\t\t\t).parent.parent.absolute()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = os.listdir(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = os.listdir(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n local_decoder_files.sort()\n expected_decoder_files.sort()\n\n # test that both decoder form hub and local files in cache are the same\n self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tOptional[Any]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = floats_list((3, 1_000)\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = processor_wavaveca(_SCREAMING_SNAKE_CASE ,return_tensors='''np'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = processor_auto(_SCREAMING_SNAKE_CASE ,return_tensors='''np'''\t\t\t\t\t\t)\n\n for key in input_wavaveca.keys():\n self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = self._get_dummy_logits()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tList[str]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self.get_feature_extractor()\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self.get_tokenizer()\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = self.get_decoder()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n self.assertListEqual(\n processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)\n\n\n\n\n\n @staticmethod\n def \t\t\t\t\t\t\ta__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t->\tTuple:\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = [d[key] for d in offsets]\n return retrieved_list\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tOptional[int]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = self._get_dummy_logits()[0]\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = processor.decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n # check Wav2Vec2CTCTokenizerOutput keys for word\n self.assertEqual(len(outputs.keys()\t\t\t\t\t\t) ,4\t\t\t\t\t\t)\n self.assertTrue('''text''' in outputs\t\t\t\t\t\t)\n self.assertTrue('''word_offsets''' in outputs\t\t\t\t\t\t)\n self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word'''\t\t\t\t\t\t)\t\t\t\t\t\t) ,outputs.text\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word'''\t\t\t\t\t\t) ,['''''', '''''', '''''']\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset'''\t\t\t\t\t\t) ,[0, 2, 4]\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset'''\t\t\t\t\t\t) ,[1, 3, 5]\t\t\t\t\t\t)\n\n\n\n\n\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tOptional[int]:\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tint = self._get_dummy_logits()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n # check Wav2Vec2CTCTokenizerOutput keys for word\n self.assertEqual(len(outputs.keys()\t\t\t\t\t\t) ,4\t\t\t\t\t\t)\n self.assertTrue('''text''' in outputs\t\t\t\t\t\t)\n self.assertTrue('''word_offsets''' in outputs\t\t\t\t\t\t)\n self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n self.assertListEqual(\n [''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word'''\t\t\t\t\t\t)\t\t\t\t\t\t) for o in outputs['''word_offsets''']] ,outputs.text\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word'''\t\t\t\t\t\t) ,['''''', '''''', '''''']\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset'''\t\t\t\t\t\t) ,[0, 2, 4]\t\t\t\t\t\t)\n self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset'''\t\t\t\t\t\t) ,[1, 3, 5]\t\t\t\t\t\t)\n\n\n\n\n\n @slow\n @require_torch\n @require_torchaudio\n def \t\t\t\t\t\t\ta__ ( self\t\t\t\t\t\t)\t\t\t\t->\tUnion[str, Any]:\n import torch\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000\t\t\t\t\t\t)\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = iter(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tUnion[str, Any] = next(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm'''\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tDict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm'''\t\t\t\t\t\t)\n\n # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = processor(sample['''audio''']['''array'''] ,return_tensors='''pt'''\t\t\t\t\t\t).input_values\n\n with torch.no_grad():\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[str] = model(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t).logits.cpu().numpy()\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = processor.decode(logits[0] ,output_word_offsets=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tTuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = [\n {\n '''start_time''': d['''start_offset'''] * time_offset,\n '''end_time''': d['''end_offset'''] * time_offset,\n '''word''': d['''word'''],\n }\n for d in output['''word_offsets''']\n ]\n\n UpperCAmelCase_ :\t\t\t\t\t\t\tAny = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''\n\n # output words\n self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word'''\t\t\t\t\t\t)\t\t\t\t\t\t) ,_SCREAMING_SNAKE_CASE\t\t\t\t\t\t)\n self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word'''\t\t\t\t\t\t)\t\t\t\t\t\t) ,output.text\t\t\t\t\t\t)\n\n # output times\n UpperCAmelCase_ :\t\t\t\t\t\t\tList[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''start_time'''\t\t\t\t\t\t)\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''end_time'''\t\t\t\t\t\t)\t\t\t\t\t\t)\n\n # fmt: off\n UpperCAmelCase_ :\t\t\t\t\t\t\tstr = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99]\t\t\t\t\t\t)\n UpperCAmelCase_ :\t\t\t\t\t\t\tOptional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94]\t\t\t\t\t\t)\n # fmt: on\n\n self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01\t\t\t\t\t\t)\t\t\t\t\t\t)\n self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01\t\t\t\t\t\t)\t\t\t\t\t\t)"},"style_context_codestyle":{"kind":"number","value":300,"string":"300"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":532,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\rfrom .glue import GlueDataset, GlueDataTrainingArguments\rfrom .language_modeling import (\r LineByLineTextDataset,\r LineByLineWithRefDataset,\r LineByLineWithSOPTextDataset,\r TextDataset,\r TextDatasetForNextSentencePrediction,\r)\rfrom .squad import SquadDataset, SquadDataTrainingArguments\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":38,"string":"38"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\rclass _lowerCAmelCase :\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\tdef __init__( self : List[Any] ,\t\tUpperCamelCase__ : int):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tsize\r\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[0] * size\r\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[0] * size\r\r\r\r\t\t\t\t@staticmethod\r\t\t\t\tdef __magic_name__\t\t\t\t( UpperCamelCase__ : int):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\treturn index | (index + 1)\r\r\r\r\t\t\t\t@staticmethod\r\t\t\t\tdef __magic_name__\t\t\t\t( UpperCamelCase__ : int):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\treturn (index & (index + 1)) - 1\r\r\r\r\t\t\t\tdef __magic_name__\t\t\t\t( self : Any ,\t\tUpperCamelCase__ : int ,\t\tUpperCamelCase__ : int):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tvalue\r\t\t\t\t\t\t\t\t\t\t\twhile index < self.size:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.get_prev(UpperCamelCase__) + 1\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif current_left_border == index:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tvalue\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax(UpperCamelCase__ ,\t\tUpperCamelCase__ ,\t\tUpperCamelCase__)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.get_next(UpperCamelCase__)\r\r\r\r\t\t\t\tdef __magic_name__\t\t\t\t( self : int ,\t\tUpperCamelCase__ : int ,\t\tUpperCamelCase__ : int):\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\t\t\t\tright -= 1 # Because of right is exclusive\r\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\t0\r\t\t\t\t\t\t\t\t\t\t\twhile left <= right:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tself.get_prev(UpperCamelCase__)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif left <= current_left:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax(UpperCamelCase__ ,\t\tself.tree[right])\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tcurrent_left\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmax(UpperCamelCase__ ,\t\tself.arr[right])\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tright -= 1\r\t\t\t\t\t\t\t\t\t\t\treturn result\r\r\rif __name__ == \"__main__\":\r\timport doctest\r\r\tdoctest.testmod()\r"},"style_context_codestyle":{"kind":"number","value":654,"string":"654"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":533,"cells":{"code":{"kind":"string","value":"\r\rfrom ...configuration_utils import PretrainedConfig\rfrom ...utils import logging\r\r\rlowerCAmelCase__\t\t\t\t\t\t\t\t=\t\tlogging.get_logger(__name__)\r\rlowerCAmelCase__\t\t\t\t\t\t\t\t=\t\t{\r \"\"\"RWKV/rwkv-4-169m-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-4-430m-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-4-1b5-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-4-3b-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-4-7b-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-4-14b-pile\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-raven-1b5\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-raven-3b\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-raven-7b\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json\"\"\",\r \"\"\"RWKV/rwkv-raven-14b\"\"\": \"\"\"https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json\"\"\",\r}\r\r\r\r\r\r\r\rclass \tlowercase (\t\t\t\t_lowercase\t\t\t\t\t\t\t):\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\t\ta__\t\t\t\t\t = \"rwkv\"\r\t\ta__\t\t\t\t\t = {\"max_position_embeddings\": \"context_length\"}\r\r\r\t\tdef __init__(\t\t\t\tself\t, __snake_case=5_02_77\t, __snake_case=10_24\t, __snake_case=40_96\t, __snake_case=32\t, __snake_case=None\t, __snake_case=None\t, __snake_case=1e-5\t, __snake_case=0\t, __snake_case=0\t, __snake_case=6\t, __snake_case=False\t, __snake_case=True\t, **__snake_case\t, ):\r\t\t\t_UpperCamelCase :\t\tstr\t\t\t= vocab_size\r\t\t\t_UpperCamelCase :\t\tint\t\t\t= context_length\r\t\t\t_UpperCamelCase :\t\tTuple\t\t\t= hidden_size\r\t\t\t_UpperCamelCase :\t\tTuple\t\t\t= num_hidden_layers\r\t\t\t_UpperCamelCase :\t\tDict\t\t\t= attention_hidden_size if attention_hidden_size is not None else hidden_size\r\t\t\t_UpperCamelCase :\t\tTuple\t\t\t= intermediate_size if intermediate_size is not None else 4 * hidden_size\r\t\t\t_UpperCamelCase :\t\tUnion[str, Any]\t\t\t= layer_norm_epsilon\r\t\t\t_UpperCamelCase :\t\tDict\t\t\t= rescale_every\r\t\t\t_UpperCamelCase :\t\tOptional[Any]\t\t\t= use_cache\r\r\t\t\t_UpperCamelCase :\t\tstr\t\t\t= bos_token_id\r\t\t\t_UpperCamelCase :\t\tOptional[Any]\t\t\t= eos_token_id\r\r\t\t\tsuper().__init__(\r\t\t\t tie_word_embeddings=__snake_case\t, bos_token_id=__snake_case\t, eos_token_id=__snake_case\t, **__snake_case)\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":719,"string":"719"},"style_context":{"kind":"string","value":"\r\r# Copyright 2023 The HuggingFace Inc. team. All rights reserved.\r#\r# Licensed under the Apache License, Version 2.0 (the \"License\");\r# you may not use this file except in compliance with the License.\r# You may obtain a copy of the License at\r#\r# http://www.apache.org/licenses/LICENSE-2.0\r#\r# Unless required by applicable law or agreed to in writing, software\r# distributed under the License is distributed on an \"AS IS\" BASIS,\r# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r# See the License for the specific language governing permissions and\r# limitations under the License.\rimport torch\r\rfrom ..models.auto import AutoModelForSequenceClassification, AutoTokenizer\rfrom .base import PipelineTool\r\r\r\r\r\r\r\rclass \tlowercase (\t\t\t\t_lowercase\t\t\t\t\t\t\t):\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\t\ta__\t\t\t\t\t = \"facebook/bart-large-mnli\"\r\t\ta__\t\t\t\t\t = (\r\t\t \"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which \"\r\t\t \"should be the text to classify, and `labels`, which should be the list of labels to use for classification. \"\r\t\t \"It returns the most likely label in the list of provided `labels` for the input text.\"\r\t\t)\r\t\ta__\t\t\t\t\t = \"text_classifier\"\r\t\ta__\t\t\t\t\t = AutoTokenizer\r\t\ta__\t\t\t\t\t = AutoModelForSequenceClassification\r\r\t\ta__\t\t\t\t\t = [\"text\", [\"text\"]]\r\t\ta__\t\t\t\t\t = [\"text\"]\r\r\r\t\tdef \t\tA__\t\t\t\t\t(\t\t\t\tself):\r\t\t\tsuper().setup()\r\t\t\t_UpperCamelCase :\t\tList[Any]\t\t\t=\t\t\t\t\t\tself.model.config\r\t\t\t_UpperCamelCase :\t\tOptional[int]\t\t\t=\t\t\t\t\t\t-1\r\t\t\tfor idx, label in config.idalabel.items():\r\t\t\t\tif label.lower().startswith('entail'):\r\t\t\t\t\t_UpperCamelCase :\t\tTuple\t\t\t=\t\t\t\t\t\tint(__snake_case)\r\t\t\tif self.entailment_id == -1:\r\t\t\t\traise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')\r\r\r\t\tdef \t\tA__\t\t\t\t\t(\t\t\t\tself\t, __snake_case\t, __snake_case):\r\t\t\t_UpperCamelCase :\t\tList[Any]\t\t\t=\t\t\t\t\t\tlabels\r\t\t\treturn self.pre_processor(\r\t\t\t [text] * len(__snake_case)\t, [f'''This example is {label}''' for label in labels]\t, return_tensors='pt'\t, padding='max_length'\t, )\r\r\r\r\r\r\r\t\tdef \t\tA__\t\t\t\t\t(\t\t\t\tself\t, __snake_case):\r\t\t\t_UpperCamelCase :\t\tstr\t\t\t=\t\t\t\t\t\toutputs.logits\r\t\t\t_UpperCamelCase :\t\tOptional[Any]\t\t\t=\t\t\t\t\t\ttorch.argmax(logits[:, 2]).item()\r\t\t\treturn self._labels[label_id]\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":648,"string":"648"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":534,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport itertools\r\nimport os\r\nfrom collections import Counter, defaultdict\r\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\r\n\r\nimport numpy as np\r\n\r\nimport datasets\r\n\r\nfrom .execute import check_correctness\r\n\r\n\r\na :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t=\t\t'''\\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\\nand William Saunders and Christopher Hesse and Andrew N. Carr \\\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'''\r\n\r\na :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t=\t\t'''\\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n'''\r\n\r\n\r\na :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t\t=\t\t'''\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\\'pass@1\\': 0.5, \\'pass@2\\': 1.0}\n'''\r\n\r\n\r\na :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t=\t\t'''\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\\n'''\r\n\r\na :\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t\t=\t\t'''The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'''\r\n\r\n\r\n\r\n@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION\t\t\t\t, _KWARGS_DESCRIPTION\t)\r\nclass \t\tSCREAMING_SNAKE_CASE__ (\t\t\tdatasets.Metric\t):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tA ( self\t\t\t\t\t\t: Dict\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\treturn datasets.MetricInfo(\r\n\t\t\t\t\t\t # This is the description that will appear on the metrics page.\r\n\t\t\t\t\t\t description=_DESCRIPTION ,\t\tcitation=_CITATION ,\t\tinputs_description=_KWARGS_DESCRIPTION ,\t\tfeatures=datasets.Features(\r\n\t\t\t\t\t\t {\r\n\t\t\t\t\t\t \"predictions\": datasets.Sequence(datasets.Value(\"string\"\t\t\t\t\t\t\t)\t\t\t\t\t\t\t),\r\n\t\t\t\t\t\t \"references\": datasets.Value(\"string\"\t\t\t\t\t\t\t),\r\n\t\t\t\t\t\t }\t\t\t\t\t\t\t) ,\t\thomepage=\"https://github.com/openai/human-eval\" ,\t\tcodebase_urls=[\"https://github.com/openai/human-eval\"] ,\t\treference_urls=[\"https://github.com/openai/human-eval\"] ,\t\tlicense=_LICENSE ,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef \t\t\t\t\t\t\tA ( self\t\t\t\t\t\t: str ,\t\ta_\t\t\t\t\t\t: List[Any] ,\t\ta_\t\t\t\t\t\t: Optional[int] ,\t\ta_\t\t\t\t\t\t: Any=[1, 10, 100] ,\t\ta_\t\t\t\t\t\t: Optional[int]=4 ,\t\ta_\t\t\t\t\t\t: Optional[int]=3.0\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tif os.getenv(\"HF_ALLOW_CODE_EVAL\" ,\t\t0\t\t\t\t\t\t\t) != \"1\":\r\n\t\t\t\t\t\t\t\t\traise ValueError(_WARNING\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tif os.name == \"nt\":\r\n\t\t\t\t\t\t\t\t\traise NotImplementedError(\"This metric is currently not supported on Windows.\"\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\twith ThreadPoolExecutor(max_workers=a_\t\t\t\t\t\t\t) as executor:\r\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\tCounter()\r\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\t0\r\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\tdefaultdict(a_\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\tfor task_id, (candidates, test_case) in enumerate(zip(a_ ,\t\ta_\t\t\t\t\t\t\t)\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor candidate in candidates:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\tcandidate + \"\\n\" + test_case\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\t(test_program, timeout, task_id, completion_id[task_id])\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\texecutor.submit(a_ ,\t\t*a_\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfutures.append(a_\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcompletion_id[task_id] += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tn_samples += 1\r\n\r\n\t\t\t\t\t\t\t\t\tfor future in as_completed(a_\t\t\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\tfuture.result()\r\n\t\t\t\t\t\t\t\t\t\t\t\tresults[result[\"task_id\"]].append((result[\"completion_id\"], result)\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__snake_case\t\t, __snake_case\t\t\t=\t\t[], []\r\n\t\t\t\t\t\tfor result in results.values():\r\n\t\t\t\t\t\t\t\t\tresult.sort()\r\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t=\t\t[r[1][\"passed\"] for r in result]\r\n\t\t\t\t\t\t\t\t\ttotal.append(len(a_\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\tcorrect.append(sum(a_\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\tnp.array(a_\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\tnp.array(a_\t\t\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\tk\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\t{f'''pass@{k}''': estimate_pass_at_k(a_ ,\t\ta_ ,\t\ta_\t\t\t\t\t\t\t).mean() for k in ks if (total >= k).all()}\r\n\r\n\t\t\t\t\t\treturn pass_at_k, results\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef __UpperCAmelCase\t( _UpperCAmelCase\t\t: str\t\t,\t\t\t\t_UpperCAmelCase\t\t: List[str]\t\t,\t\t\t\t_UpperCAmelCase\t\t: Any\t\t\t\t\t\t) -> List[Any]:\r\n\r\n\t\t\tdef estimator(_UpperCAmelCase\t\t: int\t\t,\t\t\t\t_UpperCAmelCase\t\t: int\t\t,\t\t\t\t_UpperCAmelCase\t\t: int\t\t\t\t\t\t) -> float:\r\n\t\t\t\t\t\tif n - c < k:\r\n\t\t\t\t\t\t\t\t\treturn 1.0\r\n\t\t\t\t\t\treturn 1.0 - np.prod(1.0 - k / np.arange(n - c + 1\t\t,\t\t\t\tn + 1\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\r\n\t\t\tif isinstance(_UpperCAmelCase\t\t,\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t):\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\titertools.repeat(_UpperCAmelCase\t\t,\t\t\t\tlen(_UpperCAmelCase\t\t\t\t\t\t)\t\t\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tassert len(_UpperCAmelCase\t\t\t\t\t\t) == len(_UpperCAmelCase\t\t\t\t\t\t)\r\n\t\t\t\t\t\t__snake_case\t\t\t=\t\titer(_UpperCAmelCase\t\t\t\t\t\t)\r\n\r\n\t\t\treturn np.array([estimator(int(_UpperCAmelCase\t\t\t\t\t\t)\t\t,\t\t\t\tint(_UpperCAmelCase\t\t\t\t\t\t)\t\t,\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t) for n, c in zip(_UpperCAmelCase\t\t,\t\t\t\t_UpperCAmelCase\t\t\t\t\t\t)]\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":69,"string":"69"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nclass \t\t\t__magic_name__ :\r\n\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\tdef __init__(\t\t\t\tself: Optional[int]\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ \t\t\t\t\t\t= {}\r\n\r\n\r\n\t\t\t\tdef _A\t\t\t\t(\t\t\t\tself: Optional[Any]\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\tprint(self.vertex\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tfor i in self.vertex:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint(_lowerCamelCase\t\t, ''' -> '''\t\t, ''' -> '''.join([str(_lowerCamelCase\t\t) for j in self.vertex[i]]\t\t)\t\t)\r\n\r\n\r\n\t\t\t\tdef _A\t\t\t\t(\t\t\t\tself: Optional[int]\t\t, _lowerCamelCase: int\t\t, _lowerCamelCase: int\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t# check if vertex is already present,\r\n\t\t\t\t\t\t\t\t\t\t\tif from_vertex in self.vertex:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.vertex[from_vertex].append(_lowerCamelCase\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# else make a new vertex\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ \t\t\t\t\t\t= [to_vertex]\r\n\r\n\r\n\t\t\t\tdef _A\t\t\t\t(\t\t\t\tself: int\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t# visited array for storing already visited nodes\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ \t\t\t\t\t\t= [False] * len(self.vertex\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# call the recursive helper function\r\n\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.vertex\t\t)\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not visited[i]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.dfs_recursive(_lowerCamelCase\t\t, _lowerCamelCase\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\tdef _A\t\t\t\t(\t\t\t\tself: Dict\t\t, _lowerCamelCase: int\t\t, _lowerCamelCase: list\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t# mark start vertex as visited\r\n\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE_ \t\t\t\t\t\t= True\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tprint(_lowerCamelCase\t\t, end=''' '''\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t# Recur for all the vertices that are adjacent to this node\r\n\t\t\t\t\t\t\t\t\t\t\tfor i in self.vertex:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not visited[i]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.dfs_recursive(_lowerCamelCase\t\t, _lowerCamelCase\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t__SCREAMING_SNAKE_CASE\t\t\t =Graph()\r\n\t\t\tg.add_edge(0, 1)\r\n\t\t\tg.add_edge(0, 2)\r\n\t\t\tg.add_edge(1, 2)\r\n\t\t\tg.add_edge(2, 0)\r\n\t\t\tg.add_edge(2, 3)\r\n\t\t\tg.add_edge(3, 3)\r\n\r\n\t\t\tg.print_graph()\r\n\t\t\tprint(\"\"\"DFS:\"\"\")\r\n\t\t\tg.dfs()\r\n\r\n\t\t\t# OUTPUT:\r\n\t\t\t# 0 -> 1 -> 2\r\n\t\t\t# 1 -> 2\r\n\t\t\t# 2 -> 0 -> 3\r\n\t\t\t# 3 -> 3\r\n\t\t\t# DFS:\r\n\t\t\t# 0 1 2 3\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":234,"string":"234"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":535,"cells":{"code":{"kind":"string","value":"\r\r\r\r\rimport cva\rimport numpy as np\rclass \t\t\t_UpperCamelCase :\r def __init__(\t\t\tself :Union[str, Any] , lowerCamelCase :float , lowerCamelCase :int ) ->\t\t\tstr:\r\r if k in (0.04, 0.06):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tk\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\twindow_size\r else:\r raise ValueError(\"invalid k value\" )\r def __str__(\t\t\tself :str ) ->\t\t\tstr:\r return str(self.k )\r\r\r\r\r\r\r\r def \tUpperCAmelCase_\t\t\t\t\t\t(\t\t\tself :str , lowerCamelCase :str ) ->\t\t\ttuple[cva.Mat, list[list[int]]]:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tcva.imread(lowerCamelCase , 0 )\r UpperCAmelCase__ , UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\timg.shape\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t[]\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\timg.copy()\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tcva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB )\r UpperCAmelCase__ , UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tnp.gradient(lowerCamelCase )\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tdx**2\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tdy**2\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tdx * dy\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t0.04\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tself.window_size // 2\r for y in range(lowerCamelCase , h - offset ):\r for x in range(lowerCamelCase , w - offset ):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tixx[\r y - offset : y + offset + 1, x - offset : x + offset + 1\r ].sum()\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tiyy[\r y - offset : y + offset + 1, x - offset : x + offset + 1\r ].sum()\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tixy[\r y - offset : y + offset + 1, x - offset : x + offset + 1\r ].sum()\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t(wxx * wyy) - (wxy**2)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\twxx + wyy\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tdet - k * (trace**2)\r # Can change the value\r if r > 0.5:\r corner_list.append([x, y, r] )\r color_img.itemset((y, x, 0) , 0 )\r color_img.itemset((y, x, 1) , 0 )\r color_img.itemset((y, x, 2) , 255 )\r return color_img, corner_list\r\r\rif __name__ == \"__main__\":\r _lowerCAmelCase\t\t\t\t\t\t: Optional[int]\t\t= HarrisCorner(0.0_4, 3)\r _lowerCAmelCase,\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t: Any\t\t= edge_detect.detect(\"path_to_image\")\r cva.imwrite(\"detect.png\", color_img)\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":364,"string":"364"},"style_context":{"kind":"string","value":"\r\r\r\r\rimport logging\rimport os\rimport sys\rimport warnings\rfrom dataclasses import dataclass, field\rfrom random import randint\rfrom typing import Optional\r\rimport datasets\rimport evaluate\rimport numpy as np\rfrom datasets import DatasetDict, load_dataset\r\rimport transformers\rfrom transformers import (\r AutoConfig,\r AutoFeatureExtractor,\r AutoModelForAudioClassification,\r HfArgumentParser,\r Trainer,\r TrainingArguments,\r set_seed,\r)\rfrom transformers.trainer_utils import get_last_checkpoint\rfrom transformers.utils import check_min_version, send_example_telemetry\rfrom transformers.utils.versions import require_version\r\r\r_lowerCAmelCase\t\t\t\t\t\t: int\t\t= logging.getLogger(__name__)\r\r# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\rcheck_min_version(\"4.31.0\")\r\rrequire_version(\"datasets>=1.14.0\", \"To fix: pip install -r examples/pytorch/audio-classification/requirements.txt\")\r\r\r\rdef \t\t\t\t\t\tlowerCAmelCase\t\t(\t\t_lowerCAmelCase\t\t\t\t\t\t\t: np.ndarray\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t: float\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t: int = 1_6000\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tint(round(sample_rate * max_length\t)\t)\r if len(_lowerCAmelCase\t) <= sample_length:\r return wav\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\trandint(0\t\t\t\t\t\t,\t\t\t\t\t\t\tlen(_lowerCAmelCase\t) - sample_length - 1\t)\r return wav[random_offset : random_offset + sample_length]\r@dataclass\rclass \t\t\t_UpperCamelCase :\r UpperCAmelCase_ =\t\tfield(default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Name of a dataset from the datasets package\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"The configuration name of the dataset to use (via the datasets library).\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"A file containing the training audio paths and labels.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"A file containing the validation audio paths and labels.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"train\"\"\"\t, metadata={\r \"\"\"help\"\"\": \"\"\"The name of the training data set split to use (via the datasets library). Defaults to 'train'\"\"\"\r }\t, )\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"validation\"\"\"\t, metadata={\r \"\"\"help\"\"\": (\r \"\"\"The name of the training data set split to use (via the datasets library). Defaults to 'validation'\"\"\"\r )\r }\t, )\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"audio\"\"\"\t, metadata={\"\"\"help\"\"\": \"\"\"The name of the dataset column containing the audio data. Defaults to 'audio'\"\"\"}\t, )\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"label\"\"\"\t, metadata={\"\"\"help\"\"\": \"\"\"The name of the dataset column containing the labels. Defaults to 'label'\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\r \"\"\"help\"\"\": (\r \"\"\"For debugging purposes or quicker training, truncate the number of training examples to this \"\"\"\r \"\"\"value if set.\"\"\"\r )\r }\t, )\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\r \"\"\"help\"\"\": (\r \"\"\"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\"\"\r \"\"\"value if set.\"\"\"\r )\r }\t, )\r UpperCAmelCase_ =\t\tfield(\r default=20\t, metadata={\"\"\"help\"\"\": \"\"\"Audio clips will be randomly cut to this length during training if the value is set.\"\"\"}\t, )\r\r\r\r@dataclass\rclass \t\t\t_UpperCamelCase :\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"facebook/wav2vec2-base\"\"\"\t, metadata={\"\"\"help\"\"\": \"\"\"Path to pretrained model or model identifier from huggingface.co/models\"\"\"}\t, )\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Pretrained config name or path if not the same as model_name\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Where do you want to store the pretrained models downloaded from the Hub\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=\"\"\"main\"\"\"\t, metadata={\"\"\"help\"\"\": \"\"\"The specific model version to use (can be a branch name, tag name or commit id).\"\"\"}\t, )\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Name or path of preprocessor config.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Whether to freeze the feature encoder layers of the model.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Whether to generate an attention mask in the feature extractor.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\r \"\"\"help\"\"\": (\r \"\"\"Will use the token generated when running `huggingface-cli login` (necessary to use this script \"\"\"\r \"\"\"with private models).\"\"\"\r )\r }\t, )\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Whether to freeze the feature extractor layers of the model.\"\"\"}\t\t)\r UpperCAmelCase_ =\t\tfield(\r default=lowerCAmelCase\t, metadata={\"\"\"help\"\"\": \"\"\"Will enable to load a pretrained model whose head dimensions are different.\"\"\"}\t, )\r def \tUpperCAmelCase_\t\t\t\t\t\t(\t\t\tself :str ) ->\t\t\tList[Any]:\r if not self.freeze_feature_extractor and self.freeze_feature_encoder:\r warnings.warn(\r \"The argument `--freeze_feature_extractor` is deprecated and \"\r \"will be removed in a future version. Use `--freeze_feature_encoder`\"\r \"instead. Setting `freeze_feature_encoder==True`.\" , lowerCamelCase , )\r if self.freeze_feature_extractor and not self.freeze_feature_encoder:\r raise ValueError(\r \"The argument `--freeze_feature_extractor` is deprecated and \"\r \"should not be used in combination with `--freeze_feature_encoder`.\"\r \"Only make use of `--freeze_feature_encoder`.\" )\r\r\r\rdef \t\t\t\t\t\tlowerCAmelCase\t\t(\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tHfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)\t)\r if len(sys.argv\t) == 2 and sys.argv[1].endswith(\".json\"\t):\r # If we pass only one argument to the script and it's the path to a json file,\r # let's parse it to get our arguments.\r UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tparser.parse_json_file(json_file=os.path.abspath(sys.argv[1]\t)\t)\r else:\r UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tparser.parse_args_into_dataclasses()\r\r # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\r # information sent is the one passed as arguments along with your Python/PyTorch versions.\r send_example_telemetry(\"run_audio_classification\"\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t)\r\r # Setup logging\r logging.basicConfig(\r format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\t\t\t\t\t\t,\t\t\t\t\t\t\tdatefmt=\"%m/%d/%Y %H:%M:%S\"\t\t\t\t\t\t,\t\t\t\t\t\t\thandlers=[logging.StreamHandler(sys.stdout\t)]\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r\r if training_args.should_log:\r # The default of training_args.log_level is passive, so we set log level at info here to have that default.\r transformers.utils.logging.set_verbosity_info()\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\ttraining_args.get_process_log_level()\r logger.setLevel(_lowerCAmelCase\t)\r transformers.utils.logging.set_verbosity(_lowerCAmelCase\t)\r transformers.utils.logging.enable_default_handler()\r transformers.utils.logging.enable_explicit_format()\r\r # Log on each process the small summary:\r logger.warning(\r F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''\r + F'''distributed training: {bool(training_args.local_rank != -1\t)}, 16-bits training: {training_args.fpaa}'''\t)\r logger.info(F'''Training/evaluation parameters {training_args}'''\t)\r\r # Set seed before initializing model.\r set_seed(training_args.seed\t)\r\r # Detecting last checkpoint.\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tNone\r if os.path.isdir(training_args.output_dir\t) and training_args.do_train and not training_args.overwrite_output_dir:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tget_last_checkpoint(training_args.output_dir\t)\r if last_checkpoint is None and len(os.listdir(training_args.output_dir\t)\t) > 0:\r raise ValueError(\r F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''\r \"Use --overwrite_output_dir to train from scratch.\"\t)\r elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\r logger.info(\r F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''\r \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\t)\r\r # Initialize our dataset and prepare it for the audio classification task.\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tDatasetDict()\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tload_dataset(\r data_args.dataset_name\t\t\t\t\t\t,\t\t\t\t\t\t\tdata_args.dataset_config_name\t\t\t\t\t\t,\t\t\t\t\t\t\tsplit=data_args.train_split_name\t\t\t\t\t\t,\t\t\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tload_dataset(\r data_args.dataset_name\t\t\t\t\t\t,\t\t\t\t\t\t\tdata_args.dataset_config_name\t\t\t\t\t\t,\t\t\t\t\t\t\tsplit=data_args.eval_split_name\t\t\t\t\t\t,\t\t\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r\r if data_args.audio_column_name not in raw_datasets[\"train\"].column_names:\r raise ValueError(\r F'''--audio_column_name {data_args.audio_column_name} not found in dataset \\'{data_args.dataset_name}\\'. '''\r \"Make sure to set `--audio_column_name` to the correct audio column - one of \"\r F'''{\", \".join(raw_datasets[\"train\"].column_names\t)}.'''\t)\r\r if data_args.label_column_name not in raw_datasets[\"train\"].column_names:\r raise ValueError(\r F'''--label_column_name {data_args.label_column_name} not found in dataset \\'{data_args.dataset_name}\\'. '''\r \"Make sure to set `--label_column_name` to the correct text column - one of \"\r F'''{\", \".join(raw_datasets[\"train\"].column_names\t)}.'''\t)\r\r # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over\r # transformer outputs in the classifier, but it doesn't always lead to better accuracy\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tAutoFeatureExtractor.from_pretrained(\r model_args.feature_extractor_name or model_args.model_name_or_path\t\t\t\t\t\t,\t\t\t\t\t\t\treturn_attention_mask=model_args.attention_mask\t\t\t\t\t\t,\t\t\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t\t\t,\t\t\t\t\t\t\trevision=model_args.model_revision\t\t\t\t\t\t,\t\t\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r\r # `datasets` takes care of automatically loading and resampling the audio,\r # so we just need to set the correct target sampling rate.\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\traw_datasets.cast_column(\r data_args.audio_column_name\t\t\t\t\t\t,\t\t\t\t\t\t\tdatasets.features.Audio(sampling_rate=feature_extractor.sampling_rate\t)\t)\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tfeature_extractor.model_input_names[0]\r\r def train_transforms(_lowerCAmelCase\t\t\t\t\t\t\t: Tuple\t):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t[]\r for audio in batch[data_args.audio_column_name]:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\trandom_subsample(\r audio[\"array\"]\t\t\t\t\t\t,\t\t\t\t\t\t\tmax_length=data_args.max_length_seconds\t\t\t\t\t\t,\t\t\t\t\t\t\tsample_rate=feature_extractor.sampling_rate\t)\r subsampled_wavs.append(_lowerCAmelCase\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tfeature_extractor(_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\tsampling_rate=feature_extractor.sampling_rate\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t{model_input_name: inputs.get(_lowerCAmelCase\t)}\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tlist(batch[data_args.label_column_name]\t)\r\r return output_batch\r\r def val_transforms(_lowerCAmelCase\t\t\t\t\t\t\t: Union[str, Any]\t):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t[audio[\"array\"] for audio in batch[data_args.audio_column_name]]\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tfeature_extractor(_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\tsampling_rate=feature_extractor.sampling_rate\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t{model_input_name: inputs.get(_lowerCAmelCase\t)}\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tlist(batch[data_args.label_column_name]\t)\r\r return output_batch\r\r # Prepare label mappings.\r # We'll include these in the model's config to get human readable labels in the Inference API.\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\traw_datasets[\"train\"].features[data_args.label_column_name].names\r UpperCAmelCase__ , UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t{}, {}\r for i, label in enumerate(_lowerCAmelCase\t):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tstr(_lowerCAmelCase\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tlabel\r\r # Load the accuracy metric from the datasets package\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tevaluate.load(\"accuracy\"\t)\r\r # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with\r # `predictions` and `label_ids` fields) and has to return a dictionary string to float.\r def compute_metrics(_lowerCAmelCase\t\t\t\t\t\t\t: Optional[Any]\t):\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tnp.argmax(eval_pred.predictions\t\t\t\t\t\t,\t\t\t\t\t\t\taxis=1\t)\r return metric.compute(predictions=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\treferences=eval_pred.label_ids\t)\r\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tAutoConfig.from_pretrained(\r model_args.config_name or model_args.model_name_or_path\t\t\t\t\t\t,\t\t\t\t\t\t\tnum_labels=len(_lowerCAmelCase\t)\t\t\t\t\t\t,\t\t\t\t\t\t\tlabelaid=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\tidalabel=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\tfinetuning_task=\"audio-classification\"\t\t\t\t\t\t,\t\t\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t\t\t,\t\t\t\t\t\t\trevision=model_args.model_revision\t\t\t\t\t\t,\t\t\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tAutoModelForAudioClassification.from_pretrained(\r model_args.model_name_or_path\t\t\t\t\t\t,\t\t\t\t\t\t\tfrom_tf=bool(\".ckpt\" in model_args.model_name_or_path\t)\t\t\t\t\t\t,\t\t\t\t\t\t\tconfig=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\tcache_dir=model_args.cache_dir\t\t\t\t\t\t,\t\t\t\t\t\t\trevision=model_args.model_revision\t\t\t\t\t\t,\t\t\t\t\t\t\tuse_auth_token=True if model_args.use_auth_token else None\t\t\t\t\t\t,\t\t\t\t\t\t\tignore_mismatched_sizes=model_args.ignore_mismatched_sizes\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r\r # freeze the convolutional waveform encoder\r if model_args.freeze_feature_encoder:\r model.freeze_feature_encoder()\r\r if training_args.do_train:\r if data_args.max_train_samples is not None:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t(\r raw_datasets[\"train\"].shuffle(seed=training_args.seed\t).select(range(data_args.max_train_samples\t)\t)\r )\r # Set the training transforms\r raw_datasets[\"train\"].set_transform(_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\toutput_all_columns=_lowerCAmelCase\t)\r\r if training_args.do_eval:\r if data_args.max_eval_samples is not None:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t(\r raw_datasets[\"eval\"].shuffle(seed=training_args.seed\t).select(range(data_args.max_eval_samples\t)\t)\r )\r # Set the validation transforms\r raw_datasets[\"eval\"].set_transform(_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\toutput_all_columns=_lowerCAmelCase\t)\r\r # Initialize our trainer\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tTrainer(\r model=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\targs=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\ttrain_dataset=raw_datasets[\"train\"] if training_args.do_train else None\t\t\t\t\t\t,\t\t\t\t\t\t\teval_dataset=raw_datasets[\"eval\"] if training_args.do_eval else None\t\t\t\t\t\t,\t\t\t\t\t\t\tcompute_metrics=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\ttokenizer=_lowerCAmelCase\t\t\t\t\t\t,\t\t\t\t\t\t\t)\r\r # Training\r if training_args.do_train:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tNone\r if training_args.resume_from_checkpoint is not None:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\ttraining_args.resume_from_checkpoint\r elif last_checkpoint is not None:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\tlast_checkpoint\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\ttrainer.train(resume_from_checkpoint=_lowerCAmelCase\t)\r trainer.save_model()\r trainer.log_metrics(\"train\"\t\t\t\t\t\t,\t\t\t\t\t\t\ttrain_result.metrics\t)\r trainer.save_metrics(\"train\"\t\t\t\t\t\t,\t\t\t\t\t\t\ttrain_result.metrics\t)\r trainer.save_state()\r\r # Evaluation\r if training_args.do_eval:\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\ttrainer.evaluate()\r trainer.log_metrics(\"eval\"\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t)\r trainer.save_metrics(\"eval\"\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t)\r\r # Write model card and (optionally) push to hub\r UpperCAmelCase__\t\t\t\t\t\t\t =\t\t\t\t\t\t{\r \"finetuned_from\": model_args.model_name_or_path,\r \"tasks\": \"audio-classification\",\r \"dataset\": data_args.dataset_name,\r \"tags\": [\"audio-classification\"],\r }\r if training_args.push_to_hub:\r trainer.push_to_hub(**_lowerCAmelCase\t)\r else:\r trainer.create_model_card(**_lowerCAmelCase\t)\r\r\rif __name__ == \"__main__\":\r main()\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":364,"string":"364"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":536,"cells":{"code":{"kind":"string","value":"\nfrom collections.abc import Callable\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__ :\n\n\t\t\t\t\t\tdef __init__( self\t\t\t\t\t\t\t,\t\t\t\ta = None\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t[]\n\t\t\t\t\t\t\t\t# Stores indexes of each item for supporting updates and deletion.\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t{}\n\t\t\t\t\t\t\t\t# Stores current size of heap.\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t0\n\t\t\t\t\t\t\t\t# Stores function used to evaluate the score of an item on which basis ordering\n\t\t\t\t\t\t\t\t# will be done.\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tkey or (lambda a\t\t\t\t\t: x)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> int | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn int((i - 1) / 2\t\t\t\t\t) if i > 0 else None\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> int | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tint(2 * i + 1\t\t\t\t\t)\n\t\t\t\t\t\t\t\treturn left if 0 < left < self.size else None\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> int | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tint(2 * i + 2\t\t\t\t\t)\n\t\t\t\t\t\t\t\treturn right if 0 < right < self.size else None\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase ,\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t self.pos_map[self.arr[j][0]],\n\t\t\t\t\t\t\t\t self.pos_map[self.arr[i][0]],\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t# Then swap the items in the list.\n\t\t\t\t\t\t\t\t_UpperCamelCase ,\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.arr[j], self.arr[i]\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> bool:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn self.arr[i][1] < self.arr[j][1]\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> int:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself._left(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself._right(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\ti\n\n\t\t\t\t\t\t\t\tif left is not None and not self._cmp(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tleft\n\t\t\t\t\t\t\t\tif right is not None and not self._cmp(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tright\n\n\t\t\t\t\t\t\t\treturn valid_parent\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself._parent(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\twhile parent is not None and not self._cmp(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\tself._swap(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase ,\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tparent, self._parent(a\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself._get_valid_parent(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\twhile valid_parent != index:\n\t\t\t\t\t\t\t\t\t\tself._swap(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase ,\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tvalid_parent, self._get_valid_parent(a\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tif item not in self.pos_map:\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.pos_map[item]\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t[item, self.key(a\t\t\t\t\t)]\n\t\t\t\t\t\t\t\t# Make sure heap is right in both up and down direction.\n\t\t\t\t\t\t\t\t# Ideally only one of them will make any change.\n\t\t\t\t\t\t\t\tself._heapify_up(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\tself._heapify_down(a\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tif item not in self.pos_map:\n\t\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.pos_map[item]\n\t\t\t\t\t\t\t\tdel self.pos_map[item]\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.arr[self.size - 1]\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tindex\n\t\t\t\t\t\t\t\tself.size -= 1\n\t\t\t\t\t\t\t\t# Make sure heap is right in both up and down direction. Ideally only one\n\t\t\t\t\t\t\t\t# of them will make any change- so no performance loss in calling both.\n\t\t\t\t\t\t\t\tif self.size > index:\n\t\t\t\t\t\t\t\t\t\tself._heapify_up(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tself._heapify_down(a\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tlen(self.arr\t\t\t\t\t)\n\t\t\t\t\t\t\t\tif arr_len == self.size:\n\t\t\t\t\t\t\t\t\t\tself.arr.append([item, self.key(a\t\t\t\t\t)]\t\t\t\t\t)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t[item, self.key(a\t\t\t\t\t)]\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.size\n\t\t\t\t\t\t\t\tself.size += 1\n\t\t\t\t\t\t\t\tself._heapify_up(self.size - 1\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t) -> tuple | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn self.arr[0] if self.size else None\n\n\n\n\n\n\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t) -> tuple | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.get_top()\n\t\t\t\t\t\t\t\tif top_item_tuple:\n\t\t\t\t\t\t\t\t\t\tself.delete_item(top_item_tuple[0]\t\t\t\t\t)\n\t\t\t\t\t\t\t\treturn top_item_tuple\n\n\n\n\ndef \t\t\t\t__A()\t\t->\t\t\t\t\tNone:\n\n\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\t\timport doctest\n\n\t\tdoctest.testmod()\n\n\n\n"},"code_codestyle":{"kind":"number","value":612,"string":"612"},"style_context":{"kind":"string","value":"\nfrom __future__ import annotations\n\nfrom collections.abc import Callable\nfrom typing import Generic, TypeVar\n\nlowerCamelCase__\t\t\t\t\t\t=\t\t\t\tTypeVar(\"T\")\nlowerCamelCase__\t\t\t\t\t\t=\t\t\t\tTypeVar(\"U\")\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__ ( Generic[T, U]\t\t\t\t\t\t):\n\n\t\t\t\t\t\tdef __init__( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> Optional[Any]:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tkey\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tval\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tNone\n\n\n\n\n\n\n\n\t\t\t\t\t\tdef __repr__( self\t\t\t\t\t) -> str:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn (\n\t\t\t\t\t\t\t\t F'Node: key: {self.key}, val: {self.val}, '\n\t\t\t\t\t\t\t\t F'has next: {bool(self.next\t\t\t\t\t)}, has prev: {bool(self.prev\t\t\t\t\t)}'\n\t\t\t\t\t\t\t\t)\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__ ( Generic[T, U]\t\t\t\t\t\t):\n\n\t\t\t\t\t\tdef __init__( self\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tDoubleLinkedListNode(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tDoubleLinkedListNode(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t_UpperCamelCase ,\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.rear, self.head\n\n\t\t\t\t\t\tdef __repr__( self\t\t\t\t\t) -> str:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t[\"\"\"DoubleLinkedList\"\"\"]\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.head\n\t\t\t\t\t\t\t\twhile node.next is not None:\n\t\t\t\t\t\t\t\t\t\trep.append(str(a\t\t\t\t\t)\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tnode.next\n\t\t\t\t\t\t\t\trep.append(str(self.rear\t\t\t\t\t)\t\t\t\t\t)\n\t\t\t\t\t\t\t\treturn \",\\n \".join(a\t\t\t\t\t)\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.rear.prev\n\n\t\t\t\t\t\t\t\t# All nodes other than self.head are guaranteed to have non-None previous\n\t\t\t\t\t\t\t\tassert previous is not None\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tnode\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tprevious\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tnode\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.rear\n\n\n\n\n\n\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> DoubleLinkedListNode[T, U] | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tif node.prev is None or node.next is None:\n\t\t\t\t\t\t\t\t\t\treturn None\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tnode.next\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tnode.prev\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tNone\n\t\t\t\t\t\t\t\treturn node\n\n\n\n\n\n\n\nclass \t\t\t\t\t\t\tlowerCAmelCase__ ( Generic[T, U]\t\t\t\t\t\t):\n\t\t\t\t\t\tUpperCamelCase_\t\t\t\t:\t\t\tdict[Callable[[T], U], LRUCache[T, U]]\t\t\t\t\t\t=\t\t{}\n\n\t\t\t\t\t\tdef __init__( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> int:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tDoubleLinkedList()\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tcapacity\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t0\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t0\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t0\n\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\t{}\n\n\t\t\t\t\t\tdef __repr__( self\t\t\t\t\t) -> str:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn (\n\t\t\t\t\t\t\t\t F'CacheInfo(hits={self.hits}, misses={self.miss}, '\n\t\t\t\t\t\t\t\t F'capacity={self.capacity}, current size={self.num_keys})'\n\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\tdef __contains__( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> bool:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\treturn key in self.cache\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> U | None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tif key in self.cache:\n\t\t\t\t\t\t\t\t\t\tself.hits += 1\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.cache[key]\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.list.remove(self.cache[key]\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tassert node == value_node\n\n\t\t\t\t\t\t\t\t\t\t# node is guaranteed not None because it is in self.cache\n\t\t\t\t\t\t\t\t\t\tassert node is not None\n\t\t\t\t\t\t\t\t\t\tself.list.add(a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\treturn node.val\n\t\t\t\t\t\t\t\tself.miss += 1\n\t\t\t\t\t\t\t\treturn None\n\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( self\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) -> None:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tif key not in self.cache:\n\t\t\t\t\t\t\t\t\t\tif self.num_keys >= self.capacity:\n\t\t\t\t\t\t\t\t\t\t\t\t# delete first node (oldest) when over capacity\n\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.list.head.next\n\n\t\t\t\t\t\t\t\t\t\t\t\t# guaranteed to have a non-None first node when num_keys > 0\n\t\t\t\t\t\t\t\t\t\t\t\t# explain to type checker via assertions\n\t\t\t\t\t\t\t\t\t\t\t\tassert first_node is not None\n\t\t\t\t\t\t\t\t\t\t\t\tassert first_node.key is not None\n\t\t\t\t\t\t\t\t\t\t\t\tassert (\n\t\t\t\t\t\t\t\t\t\t\t\t self.list.remove(a\t\t\t\t\t) is not None\n\t\t\t\t\t\t\t\t\t\t\t\t) # node guaranteed to be in list assert node.key is not None\n\n\t\t\t\t\t\t\t\t\t\t\t\tdel self.cache[first_node.key]\n\t\t\t\t\t\t\t\t\t\t\t\tself.num_keys -= 1\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tDoubleLinkedListNode(a\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tself.list.add(self.cache[key]\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tself.num_keys += 1\n\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t# bump node to the end of the list, update value\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tself.list.remove(self.cache[key]\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tassert node is not None # node guaranteed to be in list\n\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tvalue\n\t\t\t\t\t\t\t\t\t\tself.list.add(a\t\t\t\t\t)\n\n\n\n\n\n\n\n\t\t\t\t\t\t@classmethod\n\t\t\t\t\t\tdef \t\t\tA_\t\t\t\t\t( cls\t\t\t\t\t\t\t,\t\t\t\ta = 1_28\t\t\t\t\t) -> Callable[[Callable[[T], U]], Callable[..., U]]:\n\n\n\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\t\t\t\tdef cache_decorator_inner(a\t\t\t\t\t) -> Callable[..., U]:\n\t\t\t\t\t\t\t\t\t\tdef cache_decorator_wrapper(*a\t\t\t\t\t) -> U:\n\t\t\t\t\t\t\t\t\t\t\t\tif func not in cls.decorator_function_to_instance_map:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tLRUCache(a\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tcls.decorator_function_to_instance_map[func].get(args[0]\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tif result is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase =\t\t\t\t\t\tfunc(*a\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcls.decorator_function_to_instance_map[func].put(args[0]\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\treturn result\n\n\t\t\t\t\t\t\t\t\t\tdef cache_info() -> LRUCache[T, U]:\n\t\t\t\t\t\t\t\t\t\t\t\treturn cls.decorator_function_to_instance_map[func]\n\n\t\t\t\t\t\t\t\t\t\tsetattr(a\t\t\t\t\t\t\t,\t\t\t\t\"\"\"cache_info\"\"\"\t\t\t\t\t\t\t,\t\t\t\ta\t\t\t\t\t) # noqa: B010\n\n\t\t\t\t\t\t\t\t\t\treturn cache_decorator_wrapper\n\n\t\t\t\t\t\t\t\treturn cache_decorator_inner\n\n\nif __name__ == \"__main__\":\n\t\timport doctest\n\n\t\tdoctest.testmod()\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":612,"string":"612"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":537,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\nimport torch\r\n\r\nfrom diffusers import DDPMScheduler\r\n\r\nfrom .test_schedulers import SchedulerCommonTest\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __lowercase\t\t\t\t( _UpperCAmelCase ):\r\n UpperCamelCase\t\t\t\t\t\t\t\t= (DDPMScheduler,)\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tList[str]\t\t\t\t\t, **__lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[Any] ) -> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= {\r\n \"\"\"num_train_timesteps\"\"\": 1_0_0_0,\r\n \"\"\"beta_start\"\"\": 0.0_001,\r\n \"\"\"beta_end\"\"\": 0.02,\r\n \"\"\"beta_schedule\"\"\": \"\"\"linear\"\"\",\r\n \"\"\"variance_type\"\"\": \"\"\"fixed_small\"\"\",\r\n \"\"\"clip_sample\"\"\": True,\r\n }\r\n\r\n config.update(**__UpperCamelCase )\r\n return config\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tAny ) -> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for timesteps in [1, 5, 1_0_0, 1_0_0_0]:\r\n self.check_over_configs(num_train_timesteps=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tOptional[int] ) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1]\t\t\t\t\t, [0.002, 0.02, 0.2, 2] ):\r\n self.check_over_configs(beta_start=__UpperCamelCase\t\t\t\t\t, beta_end=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tint ) -> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for schedule in [\"linear\", \"squaredcos_cap_v2\"]:\r\n self.check_over_configs(beta_schedule=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tTuple ) -> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for variance in [\"fixed_small\", \"fixed_large\", \"other\"]:\r\n self.check_over_configs(variance_type=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tint ) -> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for clip_sample in [True, False]:\r\n self.check_over_configs(clip_sample=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any] ) -> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.check_over_configs(thresholding=__UpperCamelCase )\r\n for threshold in [0.5, 1.0, 2.0]:\r\n for prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r\n self.check_over_configs(\r\n thresholding=__UpperCamelCase\t\t\t\t\t, prediction_type=__UpperCamelCase\t\t\t\t\t, sample_max_value=__UpperCamelCase\t\t\t\t\t, )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tint ) -> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for prediction_type in [\"epsilon\", \"sample\", \"v_prediction\"]:\r\n self.check_over_configs(prediction_type=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tint ) -> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for t in [0, 5_0_0, 9_9_9]:\r\n self.check_over_forward(time_step=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tList[str] ) -> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5\r\n assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5\r\n assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tList[Any] ) -> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= len(__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.dummy_model()\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.dummy_sample_deter\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.manual_seed(0 )\r\n\r\n for t in reversed(range(__UpperCamelCase ) ):\r\n # 1. predict noise residual\r\n UpperCAmelCase\t\t\t\t\t\t\t= model(__UpperCamelCase\t\t\t\t\t, __UpperCamelCase )\r\n\r\n # 2. predict previous mean of sample x_t-1\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler.step(__UpperCamelCase\t\t\t\t\t, __UpperCamelCase\t\t\t\t\t, __UpperCamelCase\t\t\t\t\t, generator=__UpperCamelCase ).prev_sample\r\n\r\n # if t > 0:\r\n # noise = self.dummy_sample_deter\r\n # variance = scheduler.get_variance(t) ** (0.5) * noise\r\n #\r\n # sample = pred_prev_sample + variance\r\n UpperCAmelCase\t\t\t\t\t\t\t= pred_prev_sample\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.sum(torch.abs(__UpperCamelCase ) )\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.mean(torch.abs(__UpperCamelCase ) )\r\n\r\n assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2\r\n assert abs(result_mean.item() - 0.3_372 ) < 1e-3\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tOptional[int] ) -> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config(prediction_type=\"\"\"v_prediction\"\"\" )\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= len(__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.dummy_model()\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.dummy_sample_deter\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.manual_seed(0 )\r\n\r\n for t in reversed(range(__UpperCamelCase ) ):\r\n # 1. predict noise residual\r\n UpperCAmelCase\t\t\t\t\t\t\t= model(__UpperCamelCase\t\t\t\t\t, __UpperCamelCase )\r\n\r\n # 2. predict previous mean of sample x_t-1\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler.step(__UpperCamelCase\t\t\t\t\t, __UpperCamelCase\t\t\t\t\t, __UpperCamelCase\t\t\t\t\t, generator=__UpperCamelCase ).prev_sample\r\n\r\n # if t > 0:\r\n # noise = self.dummy_sample_deter\r\n # variance = scheduler.get_variance(t) ** (0.5) * noise\r\n #\r\n # sample = pred_prev_sample + variance\r\n UpperCAmelCase\t\t\t\t\t\t\t= pred_prev_sample\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.sum(torch.abs(__UpperCamelCase ) )\r\n UpperCAmelCase\t\t\t\t\t\t\t= torch.mean(torch.abs(__UpperCamelCase ) )\r\n\r\n assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2\r\n assert abs(result_mean.item() - 0.2_631 ) < 1e-3\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tList[Any] ) -> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= [1_0_0, 8_7, 5_0, 1, 0]\r\n\r\n scheduler.set_timesteps(timesteps=__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler.timesteps\r\n\r\n for i, timestep in enumerate(__UpperCamelCase ):\r\n if i == len(__UpperCamelCase ) - 1:\r\n UpperCAmelCase\t\t\t\t\t\t\t= -1\r\n else:\r\n UpperCAmelCase\t\t\t\t\t\t\t= timesteps[i + 1]\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler.previous_timestep(__UpperCamelCase )\r\n UpperCAmelCase\t\t\t\t\t\t\t= prev_t.item()\r\n\r\n self.assertEqual(__UpperCamelCase\t\t\t\t\t, __UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tTuple ) -> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= [1_0_0, 8_7, 5_0, 5_1, 0]\r\n\r\n with self.assertRaises(__UpperCamelCase\t\t\t\t\t, msg=\"\"\"`custom_timesteps` must be in descending order.\"\"\" ):\r\n scheduler.set_timesteps(timesteps=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tDict ) -> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= [1_0_0, 8_7, 5_0, 1, 0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= len(__UpperCamelCase )\r\n\r\n with self.assertRaises(__UpperCamelCase\t\t\t\t\t, msg=\"\"\"Can only pass one of `num_inference_steps` or `custom_timesteps`.\"\"\" ):\r\n scheduler.set_timesteps(num_inference_steps=__UpperCamelCase\t\t\t\t\t, timesteps=__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n def _lowercase\t\t\t\t(\t\tself\t\t\t\t\t\t:\t\t\t\t\tOptional[int] ) -> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.scheduler_classes[0]\r\n UpperCAmelCase\t\t\t\t\t\t\t= self.get_scheduler_config()\r\n UpperCAmelCase\t\t\t\t\t\t\t= scheduler_class(**__UpperCamelCase )\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= [scheduler.config.num_train_timesteps]\r\n\r\n with self.assertRaises(\r\n __UpperCamelCase\t\t\t\t\t, msg=\"\"\"`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}\"\"\"\t\t\t\t\t, ):\r\n scheduler.set_timesteps(timesteps=__UpperCamelCase )\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":719,"string":"719"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__a =\t\t\tlogging.get_logger(__name__)\r\n\r\n__a =\t\t\t{\r\n \"\"\"facebook/nllb-moe-54B\"\"\": \"\"\"https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json\"\"\",\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __lowercase\t\t\t\t( __snake_case ):\r\n UpperCamelCase\t\t\t\t\t\t\t\t= '''nllb-moe'''\r\n UpperCamelCase\t\t\t\t\t\t\t\t= ['''past_key_values''']\r\n UpperCamelCase\t\t\t\t\t\t\t\t= {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\tself\t\t\t\t\t\t:\t\t\t\t\tOptional[int]\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[Any]=1_2_8_1_1_2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tDict=1_0_2_4\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[int]=1_2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=4_0_9_6\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=1_6\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=1_2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tint=4_0_9_6\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tTuple=1_6\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tstr=0.05\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=0.05\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[Any]=True\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tTuple=True\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tstr=\"relu\"\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tDict=1_0_2_4\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=0.1\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[int]=0.1\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[Any]=0.0\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[Any]=0.02\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tDict=2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=True\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tAny=False\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tTuple=\"float32\"\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tAny=False\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[int]=1_2_8\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=6_4\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[Any]=4\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tTuple=4\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tstr=0.001\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[int]=0.001\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tTuple=\"all\"\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tAny=False\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tOptional[int]=False\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tList[str]=1.0\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tDict=0.2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any]=1\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tint=0\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tDict=2\t\t\t\t\t, __lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tint=False\t\t\t\t\t, **__lowerCamelCase\t\t\t\t\t\t:\t\t\t\t\tstr\t\t\t\t\t, ) -> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= vocab_size\r\n UpperCAmelCase\t\t\t\t\t\t\t= max_position_embeddings\r\n UpperCAmelCase\t\t\t\t\t\t\t= d_model\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_ffn_dim\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_layers\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_attention_heads\r\n UpperCAmelCase\t\t\t\t\t\t\t= decoder_ffn_dim\r\n UpperCAmelCase\t\t\t\t\t\t\t= decoder_layers\r\n UpperCAmelCase\t\t\t\t\t\t\t= decoder_attention_heads\r\n UpperCAmelCase\t\t\t\t\t\t\t= dropout\r\n UpperCAmelCase\t\t\t\t\t\t\t= attention_dropout\r\n UpperCAmelCase\t\t\t\t\t\t\t= activation_dropout\r\n UpperCAmelCase\t\t\t\t\t\t\t= activation_function\r\n UpperCAmelCase\t\t\t\t\t\t\t= init_std\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_layerdrop\r\n UpperCAmelCase\t\t\t\t\t\t\t= decoder_layerdrop\r\n UpperCAmelCase\t\t\t\t\t\t\t= use_cache\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_layers\r\n UpperCAmelCase\t\t\t\t\t\t\t= scale_embedding # scale factor will be sqrt(d_model) if True\r\n UpperCAmelCase\t\t\t\t\t\t\t= router_z_loss_coef\r\n UpperCAmelCase\t\t\t\t\t\t\t= router_aux_loss_coef\r\n UpperCAmelCase\t\t\t\t\t\t\t= decoder_sparse_step\r\n UpperCAmelCase\t\t\t\t\t\t\t= encoder_sparse_step\r\n UpperCAmelCase\t\t\t\t\t\t\t= num_experts\r\n UpperCAmelCase\t\t\t\t\t\t\t= expert_capacity\r\n UpperCAmelCase\t\t\t\t\t\t\t= router_bias\r\n if router_dtype not in [\"float32\", \"float16\", \"bfloat16\"]:\r\n raise ValueError(F\"\"\"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}\"\"\" )\r\n UpperCAmelCase\t\t\t\t\t\t\t= router_dtype\r\n\r\n UpperCAmelCase\t\t\t\t\t\t\t= router_ignore_padding_tokens\r\n UpperCAmelCase\t\t\t\t\t\t\t= batch_prioritized_routing\r\n UpperCAmelCase\t\t\t\t\t\t\t= second_expert_policy\r\n UpperCAmelCase\t\t\t\t\t\t\t= normalize_router_prob_before_dropping\r\n UpperCAmelCase\t\t\t\t\t\t\t= moe_eval_capacity_token_fraction\r\n UpperCAmelCase\t\t\t\t\t\t\t= moe_token_dropout\r\n UpperCAmelCase\t\t\t\t\t\t\t= output_router_logits\r\n super().__init__(\r\n pad_token_id=__lowerCamelCase\t\t\t\t\t, bos_token_id=__lowerCamelCase\t\t\t\t\t, eos_token_id=__lowerCamelCase\t\t\t\t\t, is_encoder_decoder=__lowerCamelCase\t\t\t\t\t, decoder_start_token_id=__lowerCamelCase\t\t\t\t\t, **__lowerCamelCase\t\t\t\t\t, )\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":627,"string":"627"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":538,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\nimport argparse\nimport json\nimport os\n\nimport evaluate\nimport torch\nfrom datasets import load_dataset\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed\n\nfrom accelerate import Accelerator, DistributedType\nfrom accelerate.utils.deepspeed import DummyOptim, DummyScheduler\n\n\n__A \t\t\t=\t\t\t\t\t16\n__A \t\t\t=\t\t\t\t\t32\n\n\n\n\ndef __A ( _lowercase , _lowercase = 16 , _lowercase = \"bert-base-cased\"\t\t\t):\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tAutoTokenizer.from_pretrained(_lowercase\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tload_dataset('''glue''' , '''mrpc'''\t\t\t)\n\n\t\t\t\t\tdef tokenize_function(_lowercase\t\t\t):\n\t\t\t\t\t\t\t\t\t\t# max_length=None => use the model max length (it's actually the default)\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase\t\t\t)\n\t\t\t\t\t\t\t\t\t\treturn outputs\n\n\t\t\t\t\t# Apply the method we just defined to all the examples in all the splits of the dataset\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tdatasets.map(\n\t\t\t\t\t _lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase\t\t\t)\n\n\t\t\t\t\t# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the\n\t\t\t\t\t# transformers library\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttokenized_datasets.rename_column('''label''' , '''labels'''\t\t\t)\n\n\t\t\t\t\tdef collate_fn(_lowercase\t\t\t):\n\t\t\t\t\t\t\t\t\t\t# On TPU it's best to pad everything to the same length or training will be very slow.\n\t\t\t\t\t\t\t\t\t\tif accelerator.distributed_type == DistributedType.TPU:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt'''\t\t\t)\n\t\t\t\t\t\t\t\t\t\treturn tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt'''\t\t\t)\n\n\t\t\t\t\t# Instantiate dataloaders.\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDataLoader(\n\t\t\t\t\t tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDataLoader(\n\t\t\t\t\t tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase\t\t\t)\n\n\t\t\t\t\treturn train_dataloader, eval_dataloader\n\n\n\n\ndef __A ( _lowercase , _lowercase\t\t\t):\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tAccelerator()\n\n\t\t\t\t\t# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tconfig['''lr''']\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tint(config['''num_epochs''']\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tint(config['''seed''']\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tint(config['''batch_size''']\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\targs.model_name_or_path\n\n\t\t\t\t\tset_seed(_lowercase\t\t\t)\n\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tget_dataloaders(_lowercase , _lowercase , _lowercase\t\t\t)\n\n\t\t\t\t\t# Instantiate the model (we build the model here so that the seed also control new weights initialization)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tAutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase\t\t\t)\n\n\t\t\t\t\t# Instantiate optimizer\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t(\n\t\t\t\t\t AdamW\n\t\t\t\t\t if accelerator.state.deepspeed_plugin is None\n\t\t\t\t\t or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config\n\t\t\t\t\t else DummyOptim\n\t\t\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\toptimizer_cls(params=model.parameters() , lr=_lowercase\t\t\t)\n\n\t\t\t\t\tif accelerator.state.deepspeed_plugin is not None:\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\taccelerator.state.deepspeed_plugin.deepspeed_config[\n\t\t\t\t\t\t\t\t\t\t '''gradient_accumulation_steps'''\n\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t1\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t(len(_lowercase\t\t\t) * num_epochs) // gradient_accumulation_steps\n\n\t\t\t\t\t# Instantiate scheduler\n\t\t\t\t\tif (\n\t\t\t\t\t accelerator.state.deepspeed_plugin is None\n\t\t\t\t\t or \"scheduler\" not in accelerator.state.deepspeed_plugin.deepspeed_config\n\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tget_linear_schedule_with_warmup(\n\t\t\t\t\t\t\t\t\t\t optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )\n\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0\t\t\t)\n\n\t\t\t\t\t# Prepare everything\n\t\t\t\t\t# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the\n\t\t\t\t\t# prepare method.\n\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t,_A\t\t\t\t,_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\taccelerator.prepare(\n\t\t\t\t\t _lowercase , _lowercase , _lowercase , _lowercase , _lowercase\t\t\t)\n\n\t\t\t\t\t# We need to keep track of how many total steps we have iterated over\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t0\n\t\t\t\t\t# We also need to keep track of the stating epoch so files are named properly\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t0\n\n\t\t\t\t\t# Now we train the model\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tevaluate.load('''glue''' , '''mrpc'''\t\t\t)\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t0\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t{}\n\t\t\t\t\tfor epoch in range(_lowercase , _lowercase\t\t\t):\n\t\t\t\t\t\t\t\t\t\tmodel.train()\n\t\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(_lowercase\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmodel(**_lowercase\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\toutputs.loss\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tloss / gradient_accumulation_steps\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taccelerator.backward(_lowercase\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif step % gradient_accumulation_steps == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlr_scheduler.step()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.zero_grad()\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toverall_step += 1\n\n\t\t\t\t\t\t\t\t\t\tmodel.eval()\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t0\n\t\t\t\t\t\t\t\t\t\tfor step, batch in enumerate(_lowercase\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# We could avoid this line since we set the accelerator with `device_placement=True`.\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch.to(accelerator.device\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmodel(**_lowercase\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\toutputs.logits.argmax(dim=-1\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# It is slightly faster to call this once, than multiple times\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\taccelerator.gather(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t (predictions, batch['''labels'''])\t\t\t) # If we are in a multiprocess environment, the last batch has duplicates\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif accelerator.use_distributed:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif step == len(_lowercase\t\t\t) - 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tpredictions[: len(eval_dataloader.dataset\t\t\t) - samples_seen]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\treferences[: len(eval_dataloader.dataset\t\t\t) - samples_seen]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsamples_seen += references.shape[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmetric.add_batch(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t predictions=_lowercase , references=_lowercase , )\n\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmetric.compute()\n\t\t\t\t\t\t\t\t\t\t# Use accelerator.print to print only on the main process.\n\t\t\t\t\t\t\t\t\t\taccelerator.print(f\"\"\"epoch {epoch}:\"\"\" , _lowercase\t\t\t)\n\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\teval_metric['''accuracy''']\n\n\t\t\t\t\t\t\t\t\t\tif best_performance < eval_metric[\"accuracy\"]:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\teval_metric['''accuracy''']\n\n\t\t\t\t\tif args.performance_lower_bound is not None:\n\t\t\t\t\t\t\t\t\t\tassert (\n\t\t\t\t\t\t\t\t\t\t args.performance_lower_bound <= best_performance\n\t\t\t\t\t\t\t\t\t\t), f\"\"\"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}\"\"\"\n\n\t\t\t\t\taccelerator.wait_for_everyone()\n\t\t\t\t\tif accelerator.is_main_process:\n\t\t\t\t\t\t\t\t\t\twith open(os.path.join(args.output_dir , '''all_results.json'''\t\t\t) , '''w'''\t\t\t) as f:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tjson.dump(_lowercase , _lowercase\t\t\t)\n\n\n\n\ndef __A ( ):\n\n\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\targparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.'''\t\t\t)\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t '''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t '''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t '''--performance_lower_bound''' , type=_lowercase , default=_lowercase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t '''--num_epochs''' , type=_lowercase , default=3 , help='''Number of train epochs.''' , )\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tparser.parse_args()\n\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t{'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}\n\t\t\t\t\ttraining_function(_lowercase , _lowercase\t\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\t\tmain()\n\n\n\n"},"code_codestyle":{"kind":"number","value":484,"string":"484"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\nimport json\nimport pathlib\nimport unittest\n\nimport numpy as np\n\nfrom transformers.testing_utils import require_torch, require_vision, slow\nfrom transformers.utils import is_torch_available, is_vision_available\n\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\n\n\nif is_torch_available():\n\t\t\t\timport torch\n\nif is_vision_available():\n\t\t\t\tfrom PIL import Image\n\n\t\t\t\tfrom transformers import DeformableDetrImageProcessor\n\n\n\n\n\nclass SCREAMING_SNAKE_CASE\t\t\t\t\t(\t\t\tunittest.TestCase ):\n\n\n\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\tdef __init__(\tself: List[str]\t\t, __A: List[str]\t\t, __A: List[str]=7\t\t, __A: Tuple=3\t\t, __A: Optional[int]=30\t\t, __A: Optional[Any]=4_00\t\t, __A: int=True\t\t, __A: str=None\t\t, __A: int=True\t\t, __A: Any=[0.5, 0.5, 0.5]\t\t, __A: Dict=[0.5, 0.5, 0.5]\t\t, __A: Dict=True\t\t, __A: str=1 / 2_55\t\t, __A: Dict=True\t\t, ) ->\t\t\t\t\tTuple:\n\t\t\t\t\t\t\t\t\t\t\t\t# by setting size[\"longest_edge\"] > max_resolution we're effectively not testing this :p\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tsize if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tparent\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tbatch_size\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tnum_channels\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmin_resolution\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmax_resolution\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tdo_resize\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tsize\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tdo_normalize\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_mean\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_std\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tdo_rescale\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\trescale_factor\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tdo_pad\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Optional[Any] ) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\t\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\t\t\t\t\t\t \"do_resize\": self.do_resize,\n\t\t\t\t\t\t\t\t\t\t\t\t \"size\": self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t \"do_normalize\": self.do_normalize,\n\t\t\t\t\t\t\t\t\t\t\t\t \"image_mean\": self.image_mean,\n\t\t\t\t\t\t\t\t\t\t\t\t \"image_std\": self.image_std,\n\t\t\t\t\t\t\t\t\t\t\t\t \"do_rescale\": self.do_rescale,\n\t\t\t\t\t\t\t\t\t\t\t\t \"rescale_factor\": self.rescale_factor,\n\t\t\t\t\t\t\t\t\t\t\t\t \"do_pad\": self.do_pad,\n\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Any\t\t, __A: Optional[Any]\t\t, __A: int=False ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\t\t\t\t\t\t\tif not batched:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_inputs[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(__A\t\t, Image.Image ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage.size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage.shape[1], image.shape[2]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif w < h:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tint(self.size['''shortest_edge'''] * h / w )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.size['''shortest_edge''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif w > h:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.size['''shortest_edge''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tint(self.size['''shortest_edge'''] * w / h )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.size['''shortest_edge''']\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.size['''shortest_edge''']\n\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t[]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor image in image_inputs:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.get_expected_values([image] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texpected_values.append((expected_height, expected_width) )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmax(__A\t\t, key=lambda __A : item[0] )[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tmax(__A\t\t, key=lambda __A : item[1] )[1]\n\n\t\t\t\t\t\t\t\t\t\t\t\treturn expected_height, expected_width\n\n\n\n\n\n@require_torch\n@require_vision\nclass SCREAMING_SNAKE_CASE\t\t\t\t\t(\t\t\tsnake_case\t,\t\t\t\t\t\tunittest.TestCase ):\n\n\n\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\t\t\t\t\t\t\tA_ \t= DeformableDetrImageProcessor if is_vision_available() else None\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: List[str] ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDeformableDetrImageProcessingTester(self )\n\n\t\t\t\t\t\t\t@property\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Tuple ) ->\t\t\t\t\tList[Any]:\n\t\t\t\t\t\t\t\t\t\t\t\treturn self.image_processor_tester.prepare_image_processor_dict()\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Any ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''image_mean''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''image_std''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''do_normalize''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''do_resize''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''do_rescale''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''do_pad''' ) )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(hasattr(__A\t\t, '''size''' ) )\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Tuple ) ->\t\t\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class.from_dict(self.image_processor_dict )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(image_processor.size\t\t, {'''shortest_edge''': 18, '''longest_edge''': 13_33} )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(image_processor.do_pad\t\t, __A )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class.from_dict(\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_dict\t\t, size=42\t\t, max_size=84\t\t, pad_and_return_pixel_mask=__A )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(image_processor.size\t\t, {'''shortest_edge''': 42, '''longest_edge''': 84} )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(image_processor.do_pad\t\t, __A )\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Dict ) ->\t\t\t\t\tAny:\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: str ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\t\t\t\t\t\t\t# Initialize image_processing\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict )\n\t\t\t\t\t\t\t\t\t\t\t\t# create random PIL images\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tprepare_image_inputs(self.image_processor_tester\t\t, equal_resolution=__A )\n\t\t\t\t\t\t\t\t\t\t\t\tfor image in image_inputs:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t\t, Image.Image )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test not batched input\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t, return_tensors='''pt''' ).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t, )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test batched\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A\t\t, batched=__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(__A\t\t, return_tensors='''pt''' ).pixel_values\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.num_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_height,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_width,\n\t\t\t\t\t\t\t\t\t\t\t\t )\t\t, )\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: str ) ->\t\t\t\t\tTuple:\n\t\t\t\t\t\t\t\t\t\t\t\t# Initialize image_processing\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict )\n\t\t\t\t\t\t\t\t\t\t\t\t# create random numpy tensors\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tprepare_image_inputs(self.image_processor_tester\t\t, equal_resolution=__A\t\t, numpify=__A )\n\t\t\t\t\t\t\t\t\t\t\t\tfor image in image_inputs:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t\t, np.ndarray )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test not batched input\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t, return_tensors='''pt''' ).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t, )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test batched\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(__A\t\t, return_tensors='''pt''' ).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A\t\t, batched=__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.num_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_height,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_width,\n\t\t\t\t\t\t\t\t\t\t\t\t )\t\t, )\n\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: int ) ->\t\t\t\t\tAny:\n\t\t\t\t\t\t\t\t\t\t\t\t# Initialize image_processing\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processing_class(**self.image_processor_dict )\n\t\t\t\t\t\t\t\t\t\t\t\t# create random PyTorch tensors\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tprepare_image_inputs(self.image_processor_tester\t\t, equal_resolution=__A\t\t, torchify=__A )\n\t\t\t\t\t\t\t\t\t\t\t\tfor image in image_inputs:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertIsInstance(__A\t\t, torch.Tensor )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test not batched input\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(image_inputs[0]\t\t, return_tensors='''pt''' ).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (1, self.image_processor_tester.num_channels, expected_height, expected_width)\t\t, )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# Test batched\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(__A\t\t, return_tensors='''pt''' ).pixel_values\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t,_A\t\t\t\t\t\t\t\t\t\t=\t\t\tself.image_processor_tester.get_expected_values(__A\t\t, batched=__A )\n\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t encoded_images.shape\t\t, (\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t self.image_processor_tester.num_channels,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_height,\n\t\t\t\t\t\t\t\t\t\t\t\t expected_width,\n\t\t\t\t\t\t\t\t\t\t\t\t )\t\t, )\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Optional[Any] ) ->\t\t\t\t\tTuple:\n\t\t\t\t\t\t\t\t\t\t\t\t# prepare image and target\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tImage.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )\n\t\t\t\t\t\t\t\t\t\t\t\twith open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt'''\t\t, '''r''' ) as f:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tjson.loads(f.read() )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t{'''image_id''': 3_97_69, '''annotations''': target}\n\n\t\t\t\t\t\t\t\t\t\t\t\t# encode them\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDeformableDetrImageProcessor()\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(images=__A\t\t, annotations=__A\t\t, return_tensors='''pt''' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# verify pixel values\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.Size([1, 3, 8_00, 10_66] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(encoding['''pixel_values'''].shape\t\t, __A )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0.2_796, 0.3_138, 0.3_481] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3]\t\t, __A\t\t, atol=1e-4 ) )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# verify area\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''area''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify boxes\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.Size([6, 4] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(encoding['''labels'''][0]['''boxes'''].shape\t\t, __A )\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0]\t\t, __A\t\t, atol=1e-3 ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify image_id\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([3_97_69] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify is_crowd\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0, 0, 0, 0, 0, 0] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify class_labels\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([75, 75, 63, 65, 17, 17] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify orig_size\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([4_80, 6_40] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify size\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([8_00, 10_66] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''size''']\t\t, __A ) )\n\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef __A\t\t\t(\tself: Dict ) ->\t\t\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\t\t\t\t\t# prepare image, target and masks_path\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tImage.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )\n\t\t\t\t\t\t\t\t\t\t\t\twith open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt'''\t\t, '''r''' ) as f:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tjson.loads(f.read() )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t{'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tpathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# encode them\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\tDeformableDetrImageProcessor(format='''coco_panoptic''' )\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\timage_processing(images=__A\t\t, annotations=__A\t\t, masks_path=__A\t\t, return_tensors='''pt''' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# verify pixel values\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.Size([1, 3, 8_00, 10_66] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(encoding['''pixel_values'''].shape\t\t, __A )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0.2_796, 0.3_138, 0.3_481] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3]\t\t, __A\t\t, atol=1e-4 ) )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# verify area\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''area''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify boxes\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.Size([6, 4] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(encoding['''labels'''][0]['''boxes'''].shape\t\t, __A )\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0]\t\t, __A\t\t, atol=1e-3 ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify image_id\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([3_97_69] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify is_crowd\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([0, 0, 0, 0, 0, 0] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify class_labels\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([17, 17, 63, 75, 75, 93] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify masks\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\t82_28_73\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item()\t\t, __A )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify orig_size\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([4_80, 6_40] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size''']\t\t, __A ) )\n\t\t\t\t\t\t\t\t\t\t\t\t# verify size\n\t\t\t\t\t\t\t\t\t\t\t\t_A\t\t\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor([8_00, 10_66] )\n\t\t\t\t\t\t\t\t\t\t\t\tself.assertTrue(torch.allclose(encoding['''labels'''][0]['''size''']\t\t, __A ) )\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":484,"string":"484"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":539,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nimport argparse\r\nimport shutil\r\nfrom pathlib import Path\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom transformers import AutoTokenizer\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( UpperCAmelCase :\t\t\tAny\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tint\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tint\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tAny=1_024 ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\t[], []\r\n\r\n __lowerCamelCase : Any \t\t\t\t\t\t=\t\t\t\t\t\tlist(zip(UpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase ) )\r\n __lowerCamelCase : List[str] \t\t\t\t\t\t=\t\t\t\t\t\tsorted_examples[0]\r\n\r\n def is_too_big(UpperCAmelCase :\t\t\tOptional[Any] ):\r\n return tok(UpperCAmelCase\t\t\t\t\t\t,\treturn_tensors=\"\"\"pt\"\"\" ).input_ids.shape[1] > max_tokens\r\n\r\n for src, tgt in tqdm(sorted_examples[1:] ):\r\n __lowerCamelCase : Union[str, Any] \t\t\t\t\t\t=\t\t\t\t\t\tnew_src + \"\"\" \"\"\" + src\r\n __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\tnew_tgt + \"\"\" \"\"\" + tgt\r\n if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example\r\n finished_src.append(UpperCAmelCase )\r\n finished_tgt.append(UpperCAmelCase )\r\n __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\tsrc, tgt\r\n else: # can fit, keep adding\r\n __lowerCamelCase : int \t\t\t\t\t\t=\t\t\t\t\t\tcand_src, cand_tgt\r\n\r\n # cleanup\r\n if new_src:\r\n assert new_tgt\r\n finished_src.append(UpperCAmelCase )\r\n finished_tgt.append(UpperCAmelCase )\r\n return finished_src, finished_tgt\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( UpperCAmelCase :\t\t\tTuple\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tPath\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tDict\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tOptional[Any] ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\tPath(UpperCAmelCase )\r\n save_path.mkdir(exist_ok=UpperCAmelCase )\r\n for split in [\"train\"]:\r\n __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\tdata_dir / f\"\"\"{split}.source\"\"\", data_dir / f\"\"\"{split}.target\"\"\"\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\t[x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\t[x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]\r\n __lowerCamelCase : int \t\t\t\t\t\t=\t\t\t\t\t\tpack_examples(UpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase )\r\n print(f\"\"\"packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.\"\"\" )\r\n Path(save_path / f\"\"\"{split}.source\"\"\" ).open(\"\"\"w\"\"\" ).write(\"\"\"\\n\"\"\".join(UpperCAmelCase ) )\r\n Path(save_path / f\"\"\"{split}.target\"\"\" ).open(\"\"\"w\"\"\" ).write(\"\"\"\\n\"\"\".join(UpperCAmelCase ) )\r\n for split in [\"val\", \"test\"]:\r\n __lowerCamelCase : Optional[Any] \t\t\t\t\t\t=\t\t\t\t\t\tdata_dir / f\"\"\"{split}.source\"\"\", data_dir / f\"\"\"{split}.target\"\"\"\r\n shutil.copyfile(UpperCAmelCase\t\t\t\t\t\t,\tsave_path / f\"\"\"{split}.source\"\"\" )\r\n shutil.copyfile(UpperCAmelCase\t\t\t\t\t\t,\tsave_path / f\"\"\"{split}.target\"\"\" )\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\targparse.ArgumentParser()\r\n parser.add_argument(\"\"\"--tok_name\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase\t\t\t\t\t\t,\thelp=\"\"\"like facebook/bart-large-cnn,t5-base, etc.\"\"\" )\r\n parser.add_argument(\"\"\"--max_seq_len\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase\t\t\t\t\t\t,\tdefault=128 )\r\n parser.add_argument(\"\"\"--data_dir\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase )\r\n parser.add_argument(\"\"\"--save_path\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase )\r\n __lowerCamelCase : Union[str, Any] \t\t\t\t\t\t=\t\t\t\t\t\tparser.parse_args()\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\tAutoTokenizer.from_pretrained(args.tok_name )\r\n return pack_data_dir(UpperCAmelCase\t\t\t\t\t\t,\tPath(args.data_dir )\t\t\t\t\t\t,\targs.max_seq_len\t\t\t\t\t\t,\targs.save_path )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n packer_cli()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":709,"string":"709"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nimport argparse\r\nimport shutil\r\nfrom pathlib import Path\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom transformers import AutoTokenizer\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( UpperCAmelCase :\t\t\tAny\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tint\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tint\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tAny=1_024 ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\t[], []\r\n\r\n __lowerCamelCase : Any \t\t\t\t\t\t=\t\t\t\t\t\tlist(zip(UpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase ) )\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : List[str] \t\t\t\t\t\t=\t\t\t\t\t\tsorted_examples[0]\r\n\r\n def is_too_big(UpperCAmelCase :\t\t\tOptional[Any] ):\r\n return tok(UpperCAmelCase\t\t\t\t\t\t,\treturn_tensors=\"\"\"pt\"\"\" ).input_ids.shape[1] > max_tokens\r\n\r\n for src, tgt in tqdm(sorted_examples[1:] ):\r\n __lowerCamelCase : Union[str, Any] \t\t\t\t\t\t=\t\t\t\t\t\tnew_src + \"\"\" \"\"\" + src\r\n __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\tnew_tgt + \"\"\" \"\"\" + tgt\r\n if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example\r\n finished_src.append(UpperCAmelCase )\r\n finished_tgt.append(UpperCAmelCase )\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : str \t\t\t\t\t\t=\t\t\t\t\t\tsrc, tgt\r\n else: # can fit, keep adding\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : int \t\t\t\t\t\t=\t\t\t\t\t\tcand_src, cand_tgt\r\n\r\n # cleanup\r\n if new_src:\r\n assert new_tgt\r\n finished_src.append(UpperCAmelCase )\r\n finished_tgt.append(UpperCAmelCase )\r\n return finished_src, finished_tgt\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( UpperCAmelCase :\t\t\tTuple\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tPath\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tDict\t\t\t\t\t\t,\tUpperCAmelCase :\t\t\tOptional[Any] ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\tPath(UpperCAmelCase )\r\n save_path.mkdir(exist_ok=UpperCAmelCase )\r\n for split in [\"train\"]:\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\tdata_dir / f\"\"\"{split}.source\"\"\", data_dir / f\"\"\"{split}.target\"\"\"\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\t[x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\t[x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : int \t\t\t\t\t\t=\t\t\t\t\t\tpack_examples(UpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase\t\t\t\t\t\t,\tUpperCAmelCase )\r\n print(f\"\"\"packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.\"\"\" )\r\n Path(save_path / f\"\"\"{split}.source\"\"\" ).open(\"\"\"w\"\"\" ).write(\"\"\"\\n\"\"\".join(UpperCAmelCase ) )\r\n Path(save_path / f\"\"\"{split}.target\"\"\" ).open(\"\"\"w\"\"\" ).write(\"\"\"\\n\"\"\".join(UpperCAmelCase ) )\r\n for split in [\"val\", \"test\"]:\r\n __lowerCamelCase\t\t\t\t\t\t, __lowerCamelCase : Optional[Any] \t\t\t\t\t\t=\t\t\t\t\t\tdata_dir / f\"\"\"{split}.source\"\"\", data_dir / f\"\"\"{split}.target\"\"\"\r\n shutil.copyfile(UpperCAmelCase\t\t\t\t\t\t,\tsave_path / f\"\"\"{split}.source\"\"\" )\r\n shutil.copyfile(UpperCAmelCase\t\t\t\t\t\t,\tsave_path / f\"\"\"{split}.target\"\"\" )\r\n\r\n\r\n\r\n\r\n\r\ndef _UpperCAmelCase ( ):\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n __lowerCamelCase : List[Any] \t\t\t\t\t\t=\t\t\t\t\t\targparse.ArgumentParser()\r\n parser.add_argument(\"\"\"--tok_name\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase\t\t\t\t\t\t,\thelp=\"\"\"like facebook/bart-large-cnn,t5-base, etc.\"\"\" )\r\n parser.add_argument(\"\"\"--max_seq_len\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase\t\t\t\t\t\t,\tdefault=128 )\r\n parser.add_argument(\"\"\"--data_dir\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase )\r\n parser.add_argument(\"\"\"--save_path\"\"\"\t\t\t\t\t\t,\ttype=UpperCAmelCase )\r\n __lowerCamelCase : Union[str, Any] \t\t\t\t\t\t=\t\t\t\t\t\tparser.parse_args()\r\n __lowerCamelCase : Tuple \t\t\t\t\t\t=\t\t\t\t\t\tAutoTokenizer.from_pretrained(args.tok_name )\r\n return pack_data_dir(UpperCAmelCase\t\t\t\t\t\t,\tPath(args.data_dir )\t\t\t\t\t\t,\targs.max_seq_len\t\t\t\t\t\t,\targs.save_path )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n packer_cli()\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":458,"string":"458"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":540,"cells":{"code":{"kind":"string","value":"\r\r\rimport collections\rimport os\rfrom typing import List, Optional, Tuple\r\rfrom transformers.utils import is_jieba_available, requires_backends\r\r\rif is_jieba_available():\r import jieba\r\rfrom ...tokenization_utils import PreTrainedTokenizer\rfrom ...utils import logging\r\r\rUpperCamelCase\t\t\t\t\t: Union[str, Any]\t\t\t = logging.get_logger(__name__)\r\rUpperCamelCase\t\t\t\t\t: Tuple\t\t\t = {\"\"\"vocab_file\"\"\": \"\"\"vocab.txt\"\"\"}\r\rUpperCamelCase\t\t\t\t\t: Union[str, Any]\t\t\t = {\r \"\"\"vocab_file\"\"\": {\r \"\"\"openbmb/cpm-ant-10b\"\"\": \"\"\"https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt\"\"\",\r },\r}\r\rUpperCamelCase\t\t\t\t\t: int\t\t\t = {\r \"\"\"openbmb/cpm-ant-10b\"\"\": 1024,\r}\r\r\r\rdef UpperCamelCase_\t\t\t\t(\t\t\t\t__a )\t-> Tuple:\r a__\t: List[str] = collections.OrderedDict()\r with open(__a\t\t\t,\t\t\t\t\"r\"\t\t\t,\t\t\t\tencoding=\"utf-8\" ) as reader:\r a__\t: Optional[int] = reader.readlines()\r for index, token in enumerate(__a ):\r a__\t: List[str] = token.rstrip(\"\\n\" )\r a__\t: Tuple = index\r return vocab\r\r\r\rclass A__ ( A__ ):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r def __init__( self\t\t\t: int\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Union[str, Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Union[str, Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Any=200 ):\r a__\t: Tuple = vocab\r a__\t: Tuple = unk_token\r a__\t: List[Any] = max_input_chars_per_word\r\r def _UpperCamelCase( self\t\t\t: int\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[str] ):\r a__\t: List[str] = list(lowerCamelCase__ )\r if len(lowerCamelCase__ ) > self.max_input_chars_per_word:\r return [self.unk_token]\r\r a__\t: Optional[int] = 0\r a__\t: Tuple = []\r while start < len(lowerCamelCase__ ):\r a__\t: Union[str, Any] = len(lowerCamelCase__ )\r a__\t: List[str] = None\r while start < end:\r a__\t: int = \"\".join(chars[start:end] )\r if substr in self.vocab:\r a__\t: Optional[Any] = substr\r break\r end -= 1\r if cur_substr is None:\r sub_tokens.append(self.unk_token )\r start += 1\r else:\r sub_tokens.append(lowerCamelCase__ )\r a__\t: List[Any] = end\r\r return sub_tokens\r\r\r\r\rclass A__ ( A__ ):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r _lowercase\t\t\t\t\t\t\t\t\t=\t\tVOCAB_FILES_NAMES\r _lowercase\t\t\t\t\t\t\t\t\t=\t\tPRETRAINED_VOCAB_FILES_MAP\r _lowercase\t\t\t\t\t\t\t\t\t=\t\tPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\r _lowercase\t\t\t\t\t\t\t\t\t=\t\t['input_ids', 'attention_mask']\r _lowercase\t\t\t\t\t\t\t\t\t=\t\tFalse\r\r def __init__( self\t\t\t: List[str]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: str\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[int]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Union[str, Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Union[str, Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: str=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: int=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[Any]=\"\"\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Any=\"left\"\t\t\t\t,\t\t\t\t**lowerCamelCase__\t\t\t: Optional[Any]\t\t\t\t,\t\t\t\t):\r requires_backends(self\t\t\t\t,\t\t\t\t[\"jieba\"] )\r super().__init__(\r bod_token=lowerCamelCase__\t\t\t\t,\t\t\t\teod_token=lowerCamelCase__\t\t\t\t,\t\t\t\tbos_token=lowerCamelCase__\t\t\t\t,\t\t\t\teos_token=lowerCamelCase__\t\t\t\t,\t\t\t\tpad_token=lowerCamelCase__\t\t\t\t,\t\t\t\tunk_token=lowerCamelCase__\t\t\t\t,\t\t\t\tline_token=lowerCamelCase__\t\t\t\t,\t\t\t\tspace_token=lowerCamelCase__\t\t\t\t,\t\t\t\tpadding_side=lowerCamelCase__\t\t\t\t,\t\t\t\t**lowerCamelCase__\t\t\t\t,\t\t\t\t)\r a__\t: Union[str, Any] = bod_token\r a__\t: Optional[Any] = eod_token\r a__\t: Any = load_vocab(lowerCamelCase__ )\r a__\t: Dict = self.encoder[space_token]\r a__\t: List[str] = self.encoder[line_token]\r\r del self.encoder[space_token]\r del self.encoder[line_token]\r\r a__\t: Optional[Any] = collections.OrderedDict(sorted(self.encoder.items()\t\t\t\t,\t\t\t\tkey=lambda lowerCamelCase__ : x[1] ) )\r a__\t: Union[str, Any] = {v: k for k, v in self.encoder.items()}\r\r a__\t: Optional[int] = WordpieceTokenizer(vocab=self.encoder\t\t\t\t,\t\t\t\tunk_token=self.unk_token )\r\r @property\r def _UpperCamelCase( self\t\t\t: List[str] ):\r return self.encoder[self.bod_token]\r\r @property\r def _UpperCamelCase( self\t\t\t: Union[str, Any] ):\r return self.encoder[self.eod_token]\r\r @property\r def _UpperCamelCase( self\t\t\t: Tuple ):\r return self.encoder[\"\\n\"]\r\r @property\r def _UpperCamelCase( self\t\t\t: Optional[int] ):\r return len(self.encoder )\r\r def _UpperCamelCase( self\t\t\t: Dict ):\r return dict(self.encoder\t\t\t\t,\t\t\t\t**self.added_tokens_encoder )\r\r def _UpperCamelCase( self\t\t\t: Optional[Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[Any] ):\r a__\t: Optional[Any] = []\r for x in jieba.cut(lowerCamelCase__\t\t\t\t,\t\t\t\tcut_all=lowerCamelCase__ ):\r output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )\r return output_tokens\r\r def _UpperCamelCase( self\t\t\t: str\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[int]\t\t\t\t,\t\t\t\t**lowerCamelCase__\t\t\t: Tuple ):\r a__\t: Tuple = [i for i in token_ids if i >= 0]\r a__\t: List[str] = [\r x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id\r ]\r return super()._decode(lowerCamelCase__\t\t\t\t,\t\t\t\t**lowerCamelCase__ )\r\r def _UpperCamelCase( self\t\t\t: Optional[int]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[int] ):\r return token in self.encoder\r\r def _UpperCamelCase( self\t\t\t: List[Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[str] ):\r return \"\".join(lowerCamelCase__ )\r\r def _UpperCamelCase( self\t\t\t: Optional[Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: str ):\r return self.encoder.get(lowerCamelCase__\t\t\t\t,\t\t\t\tself.encoder.get(self.unk_token ) )\r\r def _UpperCamelCase( self\t\t\t: Union[str, Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Tuple ):\r return self.decoder.get(lowerCamelCase__\t\t\t\t,\t\t\t\tself.unk_token )\r\r def _UpperCamelCase( self\t\t\t: Optional[int]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: str\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[str] = None ):\r if os.path.isdir(lowerCamelCase__ ):\r a__\t: Optional[int] = os.path.join(\r lowerCamelCase__\t\t\t\t,\t\t\t\t(filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"] )\r else:\r a__\t: Optional[int] = (filename_prefix + \"-\" if filename_prefix else \"\") + save_directory\r a__\t: Dict = 0\r if \" \" in self.encoder:\r a__\t: int = self.encoder[\" \"]\r del self.encoder[\" \"]\r if \"\\n\" in self.encoder:\r a__\t: int = self.encoder[\"\\n\"]\r del self.encoder[\"\\n\"]\r a__\t: Optional[int] = collections.OrderedDict(sorted(self.encoder.items()\t\t\t\t,\t\t\t\tkey=lambda lowerCamelCase__ : x[1] ) )\r with open(lowerCamelCase__\t\t\t\t,\t\t\t\t\"w\"\t\t\t\t,\t\t\t\tencoding=\"utf-8\" ) as writer:\r for token, token_index in self.encoder.items():\r if index != token_index:\r logger.warning(\r f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''\r \" Please check that the vocabulary is not corrupted!\" )\r a__\t: List[Any] = token_index\r writer.write(token + \"\\n\" )\r index += 1\r return (vocab_file,)\r\r def _UpperCamelCase( self\t\t\t: Optional[Any]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[int]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[int] = None ):\r if token_ids_a is None:\r return [self.bos_token_id] + token_ids_a\r return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a\r\r def _UpperCamelCase( self\t\t\t: Optional[int]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: List[int]\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: Optional[List[int]] = None\t\t\t\t,\t\t\t\tlowerCamelCase__\t\t\t: bool = False ):\r\r if already_has_special_tokens:\r return super().get_special_tokens_mask(\r token_ids_a=lowerCamelCase__\t\t\t\t,\t\t\t\ttoken_ids_a=lowerCamelCase__\t\t\t\t,\t\t\t\talready_has_special_tokens=lowerCamelCase__ )\r\r if token_ids_a is not None:\r return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))\r return [1] + ([0] * len(lowerCamelCase__ ))\r\r"},"code_codestyle":{"kind":"number","value":37,"string":"37"},"style_context":{"kind":"string","value":"\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rimport unittest\r\rfrom transformers import PegasusConfig, PegasusTokenizer, is_flax_available\rfrom transformers.testing_utils import require_flax, slow\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor\r\r\rif is_flax_available():\r\t\t\t\t\t\timport os\r\r\t\t\t\t\t\t# The slow tests are often failing with OOM error on GPU\r\t\t\t\t\t\t# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed\r\t\t\t\t\t\t# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html\r\t\t\t\t\t\t__A :\tList[str] = 'platform'\r\t\t\t\t\t\timport jax\r\t\t\t\t\t\timport jax.numpy as jnp\r\t\t\t\t\t\timport numpy as np\r\r\t\t\t\t\t\tfrom transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel\r\r\r\r\r@require_flax\rclass lowerCAmelCase__\t\t\t\t\t\t\t:\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t__UpperCAmelCase\t\t\t: Dict\t\t\t = PegasusConfig\r\t\t\t\t__UpperCAmelCase\t\t\t: int\t\t\t = {}\r\t\t\t\t__UpperCAmelCase\t\t\t: Tuple\t\t\t = \"gelu\"\r\r\r\r\t\t\t\tdef __init__( self :\t\t\t\t\t\tList[str] , lowercase__ :\t\t\t\t\t\tint , lowercase__ :\t\t\t\t\t\tUnion[str, Any]=1_3 , lowercase__ :\t\t\t\t\t\tDict=7 , lowercase__ :\t\t\t\t\t\tOptional[Any]=True , lowercase__ :\t\t\t\t\t\tstr=False , lowercase__ :\t\t\t\t\t\tOptional[int]=9_9 , lowercase__ :\t\t\t\t\t\tTuple=3_2 , lowercase__ :\t\t\t\t\t\tAny=5 , lowercase__ :\t\t\t\t\t\tAny=4 , lowercase__ :\t\t\t\t\t\tAny=3_7 , lowercase__ :\t\t\t\t\t\tAny=0.1 , lowercase__ :\t\t\t\t\t\tList[str]=0.1 , lowercase__ :\t\t\t\t\t\tTuple=2_0 , lowercase__ :\t\t\t\t\t\tstr=2 , lowercase__ :\t\t\t\t\t\tint=1 , lowercase__ :\t\t\t\t\t\tDict=0 , ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tint =\t\t\t\t\tparent\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tstr =\t\t\t\t\tbatch_size\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tseq_length\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tis_training\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tDict =\t\t\t\t\tuse_labels\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tvocab_size\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tint =\t\t\t\t\thidden_size\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tnum_hidden_layers\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tnum_attention_heads\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tint =\t\t\t\t\tintermediate_size\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\thidden_dropout_prob\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tattention_probs_dropout_prob\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tmax_position_embeddings\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tint =\t\t\t\t\teos_token_id\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tpad_token_id\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tbos_token_id\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tint ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tnp.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tnp.concatenate([input_ids, eos_tensor] , axis=1 )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tids_tensor([self.batch_size, self.seq_length] , self.vocab_size )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tself.config_cls(\r\t\t\t\t\t\t vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tprepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )\r\t\t\t\t\t\treturn config, inputs_dict\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tstr , lowercase__ :\t\t\t\t\t\tList[str] , lowercase__ :\t\t\t\t\t\tOptional[int] , lowercase__ :\t\t\t\t\t\tUnion[str, Any] ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\t2_0\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tmodel_class_name(lowercase__ )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tmodel.encode(inputs_dict[\"input_ids\"] )\r\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\t(\r\t\t\t\t\t\t inputs_dict[\"decoder_input_ids\"],\r\t\t\t\t\t\t inputs_dict[\"decoder_attention_mask\"],\r\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tmodel.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tjnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype=\"i4\" )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tjnp.broadcast_to(\r\t\t\t\t\t\t jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tmodel.decode(\r\t\t\t\t\t\t decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tjnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype=\"i4\" )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tmodel.decode(\r\t\t\t\t\t\t decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tmodel.decode(lowercase__ , lowercase__ )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tnp.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )\r\t\t\t\t\t\tself.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tUnion[str, Any] , lowercase__ :\t\t\t\t\t\tUnion[str, Any] , lowercase__ :\t\t\t\t\t\tList[str] , lowercase__ :\t\t\t\t\t\tOptional[Any] ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\t2_0\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tmodel_class_name(lowercase__ )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tmodel.encode(inputs_dict[\"input_ids\"] )\r\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\t(\r\t\t\t\t\t\t inputs_dict[\"decoder_input_ids\"],\r\t\t\t\t\t\t inputs_dict[\"decoder_attention_mask\"],\r\t\t\t\t\t\t)\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tjnp.concatenate(\r\t\t\t\t\t\t [\r\t\t\t\t\t\t decoder_attention_mask,\r\t\t\t\t\t\t jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),\r\t\t\t\t\t\t ] , axis=-1 , )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tmodel.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tjnp.broadcast_to(\r\t\t\t\t\t\t jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tstr =\t\t\t\t\tmodel.decode(\r\t\t\t\t\t\t decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tjnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype=\"i4\" )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tDict =\t\t\t\t\tmodel.decode(\r\t\t\t\t\t\t decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tmodel.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tnp.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )\r\t\t\t\t\t\tself.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )\r\r\r\r\r\rdef snake_case__\t\t\t\t\t\t\t( _lowerCamelCase,\t_lowerCamelCase,\t_lowerCamelCase,\t_lowerCamelCase=None,\t_lowerCamelCase=None,\t) ->int:\r\r\r\r\r\r\r\t\t\"\"\"simple docstring\"\"\"\r\r\r\t\tif attention_mask is None:\r\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tnp.not_equal(_lowerCamelCase,\tconfig.pad_token_id\t\t\t\t\t).astype(np.inta\t\t\t\t\t)\r\t\tif decoder_attention_mask is None:\r\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\tnp.concatenate(\r\t\t\t\t [\r\t\t\t\t np.ones(decoder_input_ids[:, :1].shape,\tdtype=np.inta\t\t\t\t\t),\r\t\t\t\t np.not_equal(decoder_input_ids[:, 1:],\tconfig.pad_token_id\t\t\t\t\t).astype(np.inta\t\t\t\t\t),\r\t\t\t\t ],\taxis=-1,\t)\r\t\treturn {\r\t\t \"input_ids\": input_ids,\r\t\t \"decoder_input_ids\": decoder_input_ids,\r\t\t \"attention_mask\": attention_mask,\r\t\t \"decoder_attention_mask\": decoder_attention_mask,\r\t\t}\r\r\r\r\r\r@require_flax\rclass lowerCAmelCase__\t\t\t\t\t\t\t( lowerCAmelCase_\t, unittest.TestCase ):\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\t\t\t\t__UpperCAmelCase\t\t\t: int\t\t\t = (\r\t\t\t\t (\r\t\t\t\t FlaxPegasusForConditionalGeneration,\r\t\t\t\t FlaxPegasusModel,\r\t\t\t\t )\r\t\t\t\t if is_flax_available()\r\t\t\t\t else ()\r\t\t\t\t)\r\t\t\t\t__UpperCAmelCase\t\t\t: Optional[int]\t\t\t = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()\r\t\t\t\t__UpperCAmelCase\t\t\t: Dict\t\t\t = True\r\t\t\t\t__UpperCAmelCase\t\t\t: int\t\t\t = False\r\t\t\t\t__UpperCAmelCase\t\t\t: Optional[int]\t\t\t = False\r\t\t\t\t__UpperCAmelCase\t\t\t: Optional[int]\t\t\t = False\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tList[Any] ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tFlaxPegasusModelTester(self )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tConfigTester(self , config_class=lowercase__ )\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tList[Any] ):\r\t\t\t\t\t\tself.config_tester.run_common_tests()\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tOptional[Any] ):\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\t\t\t\t\t\t\t\tself.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tOptional[int] ):\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\t\t\t\t\t\t\t\tself.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tTuple ):\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\r\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\t\t\t\t\t\t\t\twith self.subTest(model_class.__name__ ):\r\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tself._prepare_for_class(lowercase__ , lowercase__ )\r\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tmodel_class(lowercase__ )\r\r\t\t\t\t\t\t\t\t\t\t@jax.jit\r\t\t\t\t\t\t\t\t\t\tdef encode_jitted(lowercase__ :\t\t\t\t\t\tList[str] , lowercase__ :\t\t\t\t\t\tint=None , **lowercase__ :\t\t\t\t\t\tTuple ):\r\t\t\t\t\t\t\t\t\t\t\t\treturn model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )\r\r\t\t\t\t\t\t\t\t\t\twith self.subTest(\"JIT Enabled\" ):\r\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[Any] =\t\t\t\t\tencode_jitted(**lowercase__ ).to_tuple()\r\r\t\t\t\t\t\t\t\t\t\twith self.subTest(\"JIT Disabled\" ):\r\t\t\t\t\t\t\t\t\t\t\t\twith jax.disable_jit():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tencode_jitted(**lowercase__ ).to_tuple()\r\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowercase__ ) , len(lowercase__ ) )\r\t\t\t\t\t\t\t\t\t\tfor jitted_output, output in zip(lowercase__ , lowercase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(jitted_output.shape , output.shape )\r\r\r\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tOptional[Any] ):\r\t\t\t\t\t\t__lowercase ,__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\r\t\t\t\t\t\tfor model_class in self.all_model_classes:\r\t\t\t\t\t\t\t\twith self.subTest(model_class.__name__ ):\r\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\tmodel_class(lowercase__ )\r\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tList[str] =\t\t\t\t\tmodel.encode(inputs_dict[\"input_ids\"] , inputs_dict[\"attention_mask\"] )\r\r\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[int] =\t\t\t\t\t{\r\t\t\t\t\t\t\t\t\t\t \"decoder_input_ids\": inputs_dict[\"decoder_input_ids\"],\r\t\t\t\t\t\t\t\t\t\t \"decoder_attention_mask\": inputs_dict[\"decoder_attention_mask\"],\r\t\t\t\t\t\t\t\t\t\t \"encoder_outputs\": encoder_outputs,\r\t\t\t\t\t\t\t\t\t\t}\r\r\t\t\t\t\t\t\t\t\t\t@jax.jit\r\t\t\t\t\t\t\t\t\t\tdef decode_jitted(lowercase__ :\t\t\t\t\t\tOptional[int] , lowercase__ :\t\t\t\t\t\tOptional[int] , lowercase__ :\t\t\t\t\t\tAny ):\r\t\t\t\t\t\t\t\t\t\t\t\treturn model.decode(\r\t\t\t\t\t\t\t\t\t\t\t\t decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )\r\r\t\t\t\t\t\t\t\t\t\twith self.subTest(\"JIT Enabled\" ):\r\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tdecode_jitted(**lowercase__ ).to_tuple()\r\r\t\t\t\t\t\t\t\t\t\twith self.subTest(\"JIT Disabled\" ):\r\t\t\t\t\t\t\t\t\t\t\t\twith jax.disable_jit():\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tdecode_jitted(**lowercase__ ).to_tuple()\r\r\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowercase__ ) , len(lowercase__ ) )\r\t\t\t\t\t\t\t\t\t\tfor jitted_output, output in zip(lowercase__ , lowercase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(jitted_output.shape , output.shape )\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tAny ):\r\t\t\t\t\t\tfor model_class_name in self.all_model_classes:\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tint =\t\t\t\t\tmodel_class_name.from_pretrained(\"google/pegasus-large\" , from_pt=lowercase__ )\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\tnp.ones((1, 1) )\r\t\t\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tmodel(lowercase__ )\r\t\t\t\t\t\t\t\tself.assertIsNotNone(lowercase__ )\r\r\r\r\t\t\t\t@slow\r\t\t\t\tdef snake_case ( self :\t\t\t\t\t\tOptional[int] ):\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tstr =\t\t\t\t\tFlaxPegasusForConditionalGeneration.from_pretrained(\"google/pegasus-xsum\" )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tOptional[Any] =\t\t\t\t\tPegasusTokenizer.from_pretrained(\"google/pegasus-xsum\" )\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tAny =\t\t\t\t\t[\r\t\t\t\t\t\t \" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\",\r\t\t\t\t\t\t \" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\\\"We got told like this morning 'Oh I think you're nominated'\\\", said Dappy.\\\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\\\"Bandmate Fazer added: \\\"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\\\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\\\"At the end of the day we're grateful to be where we are in our careers.\\\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\\\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\\\"We just done Edinburgh the other day,\\\" said Dappy.\\\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\\\" \",\r\t\t\t\t\t\t]\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tUnion[str, Any] =\t\t\t\t\t[\r\t\t\t\t\t\t \"California's largest electricity provider has turned off power to hundreds of thousands of customers.\",\r\t\t\t\t\t\t \"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.\",\r\t\t\t\t\t\t]\r\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\ttokenizer(lowercase__ , return_tensors=\"np\" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tTuple =\t\t\t\t\tmodel.generate(**lowercase__ , num_beams=2 ).sequences\r\t\t\t\t\t\t__lowercase\t\t\t\t\t:\t\tstr =\t\t\t\t\ttokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )\r\t\t\t\t\t\tassert tgt_text == decoded\r"},"style_context_codestyle":{"kind":"number","value":575,"string":"575"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":541,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\rimport unicodedata\rfrom dataclasses import dataclass\rfrom typing import Optional, Union\r\rimport numpy as np\r\rfrom transformers.data.data_collator import DataCollatorMixin\rfrom transformers.file_utils import PaddingStrategy\rfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\r\r\r\r\rdef \t\t\t\t\tlowercase__ (\t\t\t\t\tlowercase_\t\t,lowercase_\t\t,lowercase_\t\t,lowercase_\t\t\t)\t\t\t\t\t->\t\tAny:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r if isinstance(lowerCAmelCase__\t\t,lowerCAmelCase__\t\t\t):\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= np.full((len(lowerCAmelCase__\t\t\t), sequence_length, 2)\t\t,lowerCAmelCase__\t\t\t)\r else:\r _UpperCamelCase :\tstr\t\t\t\t\t\t= np.full((len(lowerCAmelCase__\t\t\t), sequence_length)\t\t,lowerCAmelCase__\t\t\t)\r\r for i, tensor in enumerate(lowerCAmelCase__\t\t\t):\r if padding_side == \"right\":\r if isinstance(lowerCAmelCase__\t\t,lowerCAmelCase__\t\t\t):\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= tensor[:sequence_length]\r else:\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= tensor[:sequence_length]\r else:\r if isinstance(lowerCAmelCase__\t\t,lowerCAmelCase__\t\t\t):\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= tensor[:sequence_length]\r else:\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= tensor[:sequence_length]\r\r return out_tensor.tolist()\r\r\r\r\rdef \t\t\t\t\tlowercase__ (\t\t\t\t\tlowercase_\t\t\t)\t\t\t\t\t->\t\tstr:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r _UpperCamelCase :\tDict\t\t\t\t\t\t= ord(lowerCAmelCase__\t\t\t)\r if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\r return True\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= unicodedata.category(lowerCAmelCase__\t\t\t)\r if cat.startswith(\"P\"\t\t\t):\r return True\r return False\r\r\r\r@dataclass\rclass __SCREAMING_SNAKE_CASE (\t\t\t\t\t\t__a ):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:PreTrainedTokenizerBase\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Union[bool, str, PaddingStrategy]\t =\tTrue\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Optional[int]\t =\tNone\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Optional[int]\t =\tNone\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:int\t =\t-100\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:str\t =\t\"pt\"\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : Tuple\t\t\t\t\t, __a : List[str]\t)\t\t->\t\t\t\tList[str]:\r import torch\r\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= \"label\" if \"label\" in features[0].keys() else \"labels\"\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= [feature[label_name] for feature in features] if label_name in features[0].keys() else None\r _UpperCamelCase :\tDict\t\t\t\t\t\t= self.tokenizer.pad(\r snake_case__\t\t\t\t\t, padding=self.padding\t\t\t\t\t, max_length=self.max_length\t\t\t\t\t, pad_to_multiple_of=self.pad_to_multiple_of\t\t\t\t\t, return_tensors=\"pt\" if labels is None else None\t\t\t\t\t, )\r\r if labels is None:\r return batch\r\r _UpperCamelCase :\tAny\t\t\t\t\t\t= torch.tensor(batch[\"entity_ids\"]\t).shape[1]\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= self.tokenizer.padding_side\r if padding_side == \"right\":\r _UpperCamelCase :\tDict\t\t\t\t\t\t= [\r list(snake_case__\t) + [self.label_pad_token_id] * (sequence_length - len(snake_case__\t)) for label in labels\r ]\r else:\r _UpperCamelCase :\tint\t\t\t\t\t\t= [\r [self.label_pad_token_id] * (sequence_length - len(snake_case__\t)) + list(snake_case__\t) for label in labels\r ]\r\r _UpperCamelCase :\tAny\t\t\t\t\t\t= [feature[\"ner_tags\"] for feature in features]\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= padding_tensor(snake_case__\t\t\t\t\t, -1\t\t\t\t\t, snake_case__\t\t\t\t\t, snake_case__\t)\r _UpperCamelCase :\tstr\t\t\t\t\t\t= [feature[\"original_entity_spans\"] for feature in features]\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= padding_tensor(snake_case__\t\t\t\t\t, (-1, -1)\t\t\t\t\t, snake_case__\t\t\t\t\t, snake_case__\t)\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= {k: torch.tensor(snake_case__\t\t\t\t\t, dtype=torch.intaa\t) for k, v in batch.items()}\r\r return batch\r"},"code_codestyle":{"kind":"number","value":705,"string":"705"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r\"\"\"simple docstring\"\"\"\r\r\rfrom __future__ import annotations\r\rimport unittest\r\rfrom transformers import LEDConfig, is_tf_available\rfrom transformers.testing_utils import require_tf, slow\r\rfrom ...test_configuration_common import ConfigTester\rfrom ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor\rfrom ...test_pipeline_mixin import PipelineTesterMixin\r\r\rif is_tf_available():\r import tensorflow as tf\r\r from transformers import TFLEDForConditionalGeneration, TFLEDModel\r\r\r\r@require_tf\rclass __SCREAMING_SNAKE_CASE :\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Any\t =\t\t\t\t\tLEDConfig\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:str\t =\t\t\t\t\t{}\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:List[str]\t =\t\t\t\t\t\"gelu\"\r\r def __init__(\t\tself : List[Any]\t\t\t\t\t, __a : Union[str, Any]\t\t\t\t\t, __a : List[Any]=13\t\t\t\t\t, __a : int=7\t\t\t\t\t, __a : str=True\t\t\t\t\t, __a : Any=False\t\t\t\t\t, __a : str=99\t\t\t\t\t, __a : str=32\t\t\t\t\t, __a : Union[str, Any]=2\t\t\t\t\t, __a : Optional[Any]=4\t\t\t\t\t, __a : List[Any]=37\t\t\t\t\t, __a : List[Any]=0.1\t\t\t\t\t, __a : Tuple=0.1\t\t\t\t\t, __a : Dict=20\t\t\t\t\t, __a : str=2\t\t\t\t\t, __a : Dict=1\t\t\t\t\t, __a : Any=0\t\t\t\t\t, __a : List[Any]=4\t\t\t\t\t, )\t\t->\t\t\t\tList[Any]:\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= parent\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= batch_size\r _UpperCamelCase :\tstr\t\t\t\t\t\t= seq_length\r _UpperCamelCase :\tstr\t\t\t\t\t\t= is_training\r _UpperCamelCase :\tAny\t\t\t\t\t\t= use_labels\r _UpperCamelCase :\tAny\t\t\t\t\t\t= vocab_size\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= hidden_size\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= num_hidden_layers\r _UpperCamelCase :\tDict\t\t\t\t\t\t= num_attention_heads\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= intermediate_size\r\r _UpperCamelCase :\tint\t\t\t\t\t\t= hidden_dropout_prob\r _UpperCamelCase :\tDict\t\t\t\t\t\t= attention_probs_dropout_prob\r _UpperCamelCase :\tstr\t\t\t\t\t\t= max_position_embeddings\r _UpperCamelCase :\tint\t\t\t\t\t\t= eos_token_id\r _UpperCamelCase :\tDict\t\t\t\t\t\t= pad_token_id\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= bos_token_id\r _UpperCamelCase :\tstr\t\t\t\t\t\t= attention_window\r\r # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size\r # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention\r # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]\r # because its local attention only attends to `self.attention_window` and one before and one after\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= self.attention_window + 2\r\r # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for\r # the `test_attention_outputs` and `test_hidden_states_output` tests\r _UpperCamelCase :\tint\t\t\t\t\t\t= (\r self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window\r )\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : int\t)\t\t->\t\t\t\tstr:\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length - 1]\t\t\t\t\t, self.vocab_size\t)\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size\t)\t\t\t\t\t, 1\t)\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= tf.concat([input_ids, eos_tensor]\t\t\t\t\t, axis=1\t)\r\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= ids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t, self.vocab_size\t)\r\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= self.config_cls(\r vocab_size=self.vocab_size\t\t\t\t\t, d_model=self.hidden_size\t\t\t\t\t, encoder_layers=self.num_hidden_layers\t\t\t\t\t, decoder_layers=self.num_hidden_layers\t\t\t\t\t, encoder_attention_heads=self.num_attention_heads\t\t\t\t\t, decoder_attention_heads=self.num_attention_heads\t\t\t\t\t, encoder_ffn_dim=self.intermediate_size\t\t\t\t\t, decoder_ffn_dim=self.intermediate_size\t\t\t\t\t, dropout=self.hidden_dropout_prob\t\t\t\t\t, attention_dropout=self.attention_probs_dropout_prob\t\t\t\t\t, max_position_embeddings=self.max_position_embeddings\t\t\t\t\t, eos_token_ids=[2]\t\t\t\t\t, bos_token_id=self.bos_token_id\t\t\t\t\t, pad_token_id=self.pad_token_id\t\t\t\t\t, decoder_start_token_id=self.pad_token_id\t\t\t\t\t, attention_window=self.attention_window\t\t\t\t\t, **self.config_updates\t\t\t\t\t, )\r _UpperCamelCase :\tDict\t\t\t\t\t\t= prepare_led_inputs_dict(__a\t\t\t\t\t, __a\t\t\t\t\t, __a\t)\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= tf.concat(\r [tf.zeros_like(__a\t)[:, :-1], tf.ones_like(__a\t)[:, -1:]]\t\t\t\t\t, axis=-1\t\t\t\t\t, )\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= global_attention_mask\r return config, inputs_dict\r\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : List[str]\t\t\t\t\t, __a : List[Any]\t\t\t\t\t, __a : int\t)\t\t->\t\t\t\tTuple:\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= TFLEDModel(config=__a\t).get_decoder()\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= inputs_dict[\"input_ids\"]\r\r _UpperCamelCase :\tint\t\t\t\t\t\t= input_ids[:1, :]\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= inputs_dict[\"attention_mask\"][:1, :]\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= 1\r\r # first forward pass\r _UpperCamelCase :\tAny\t\t\t\t\t\t= model(__a\t\t\t\t\t, attention_mask=__a\t\t\t\t\t, use_cache=__a\t)\r\r _UpperCamelCase, _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= outputs.to_tuple()\r\r # create hypothetical next token and extent to next_input_ids\r _UpperCamelCase :\tAny\t\t\t\t\t\t= ids_tensor((self.batch_size, 3)\t\t\t\t\t, config.vocab_size\t)\r _UpperCamelCase :\tAny\t\t\t\t\t\t= tf.cast(ids_tensor((self.batch_size, 3)\t\t\t\t\t, 2\t)\t\t\t\t\t, tf.inta\t)\r\r # append to next input_ids and\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= tf.concat([input_ids, next_tokens]\t\t\t\t\t, axis=-1\t)\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= tf.concat([attention_mask, next_attn_mask]\t\t\t\t\t, axis=-1\t)\r\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= model(__a\t\t\t\t\t, attention_mask=__a\t)[0]\r _UpperCamelCase :\tint\t\t\t\t\t\t= model(__a\t\t\t\t\t, attention_mask=__a\t\t\t\t\t, past_key_values=__a\t)[0]\r\r self.parent.assertEqual(next_tokens.shape[1]\t\t\t\t\t, output_from_past.shape[1]\t)\r\r # select random slice\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= int(ids_tensor((1,)\t\t\t\t\t, output_from_past.shape[-1]\t)\t)\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= output_from_no_past[:, -3:, random_slice_idx]\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= output_from_past[:, :, random_slice_idx]\r\r # test that outputs are equal for slice\r tf.debugging.assert_near(__a\t\t\t\t\t, __a\t\t\t\t\t, rtol=1e-3\t)\r\r\r\r\r\rdef \t\t\t\t\tlowercase__ (\t\t\t\t\tlowercase_\t\t,lowercase_\t\t,lowercase_\t\t,lowercase_=None\t\t,lowercase_=None\t\t,lowercase_=None\t\t,lowercase_=None\t\t,)\t\t\t\t\t->\t\tDict:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r if attention_mask is None:\r _UpperCamelCase :\tstr\t\t\t\t\t\t= tf.cast(tf.math.not_equal(lowercase_\t\t,config.pad_token_id\t\t\t)\t\t,tf.inta\t\t\t)\r if decoder_attention_mask is None:\r _UpperCamelCase :\tstr\t\t\t\t\t\t= tf.concat(\r [\r tf.ones(decoder_input_ids[:, :1].shape\t\t,dtype=tf.inta\t\t\t),\r tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:]\t\t,config.pad_token_id\t\t\t)\t\t,tf.inta\t\t\t),\r ]\t\t,axis=-1\t\t,)\r if head_mask is None:\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= tf.ones((config.encoder_layers, config.encoder_attention_heads)\t\t\t)\r if decoder_head_mask is None:\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= tf.ones((config.decoder_layers, config.decoder_attention_heads)\t\t\t)\r return {\r \"input_ids\": input_ids,\r \"attention_mask\": attention_mask,\r \"decoder_input_ids\": decoder_input_ids,\r \"decoder_attention_mask\": decoder_attention_mask,\r \"head_mask\": head_mask,\r \"decoder_head_mask\": decoder_head_mask,\r }\r\r\r\r@require_tf\rclass __SCREAMING_SNAKE_CASE (\t\t\t\t\t\t_UpperCamelCase\t\t, _UpperCamelCase\t\t, unittest.TestCase ):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Any\t =\t\t\t\t\t(TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:List[str]\t =\t\t\t\t\t(TFLEDForConditionalGeneration,) if is_tf_available() else ()\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:List[str]\t =\t\t\t\t\t(\r {\r \"conversational\": TFLEDForConditionalGeneration,\r \"feature-extraction\": TFLEDModel,\r \"summarization\": TFLEDForConditionalGeneration,\r \"text2text-generation\": TFLEDForConditionalGeneration,\r \"translation\": TFLEDForConditionalGeneration,\r }\r if is_tf_available()\r else {}\r )\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Tuple\t =\t\t\t\t\tTrue\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:str\t =\t\t\t\t\tFalse\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:Optional[Any]\t =\t\t\t\t\tFalse\r SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:int\t =\t\t\t\t\tFalse\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : str\t)\t\t->\t\t\t\tList[Any]:\r _UpperCamelCase :\tint\t\t\t\t\t\t= TFLEDModelTester(self\t)\r _UpperCamelCase :\tAny\t\t\t\t\t\t= ConfigTester(self\t\t\t\t\t, config_class=__a\t)\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : Optional[Any]\t)\t\t->\t\t\t\tOptional[Any]:\r self.config_tester.run_common_tests()\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : List[str]\t)\t\t->\t\t\t\tint:\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r self.model_tester.check_decoder_model_past_large_inputs(*__a\t)\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : List[str]\t)\t\t->\t\t\t\tList[str]:\r _UpperCamelCase, _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= tf.zeros_like(inputs_dict[\"attention_mask\"]\t)\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= 2\r _UpperCamelCase :\tstr\t\t\t\t\t\t= tf.where(\r tf.range(self.model_tester.seq_length\t)[None, :] < num_global_attn_indices\t\t\t\t\t, 1\t\t\t\t\t, inputs_dict[\"global_attention_mask\"]\t\t\t\t\t, )\r\r _UpperCamelCase :\tDict\t\t\t\t\t\t= True\r _UpperCamelCase :\tstr\t\t\t\t\t\t= self.model_tester.seq_length\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= self.model_tester.encoder_seq_length\r\r def check_decoder_attentions_output(__a : Optional[int]\t):\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= outputs.decoder_attentions\r self.assertEqual(len(__a\t)\t\t\t\t\t, self.model_tester.num_hidden_layers\t)\r self.assertListEqual(\r list(decoder_attentions[0].shape[-3:]\t)\t\t\t\t\t, [self.model_tester.num_attention_heads, seq_length, seq_length]\t\t\t\t\t, )\r\r def check_encoder_attentions_output(__a : Optional[Any]\t):\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= [t.numpy() for t in outputs.encoder_attentions]\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= [t.numpy() for t in outputs.encoder_global_attentions]\r self.assertEqual(len(__a\t)\t\t\t\t\t, self.model_tester.num_hidden_layers\t)\r self.assertEqual(len(__a\t)\t\t\t\t\t, self.model_tester.num_hidden_layers\t)\r self.assertListEqual(\r list(attentions[0].shape[-3:]\t)\t\t\t\t\t, [self.model_tester.num_attention_heads, seq_length, seq_length]\t\t\t\t\t, )\r self.assertListEqual(\r list(global_attentions[0].shape[-3:]\t)\t\t\t\t\t, [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices]\t\t\t\t\t, )\r\r for model_class in self.all_model_classes:\r _UpperCamelCase :\tDict\t\t\t\t\t\t= True\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= False\r _UpperCamelCase :\tint\t\t\t\t\t\t= False\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= model_class(__a\t)\r _UpperCamelCase :\tint\t\t\t\t\t\t= model(self._prepare_for_class(__a\t\t\t\t\t, __a\t)\t)\r _UpperCamelCase :\tAny\t\t\t\t\t\t= len(__a\t)\r self.assertEqual(config.output_hidden_states\t\t\t\t\t, __a\t)\r check_encoder_attentions_output(__a\t)\r\r if self.is_encoder_decoder:\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= model_class(__a\t)\r _UpperCamelCase :\tList[Any]\t\t\t\t\t\t= model(self._prepare_for_class(__a\t\t\t\t\t, __a\t)\t)\r self.assertEqual(config.output_hidden_states\t\t\t\t\t, __a\t)\r check_decoder_attentions_output(__a\t)\r\r # Check that output attentions can also be changed via the config\r del inputs_dict[\"output_attentions\"]\r _UpperCamelCase :\tint\t\t\t\t\t\t= True\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= model_class(__a\t)\r _UpperCamelCase :\tstr\t\t\t\t\t\t= model(self._prepare_for_class(__a\t\t\t\t\t, __a\t)\t)\r self.assertEqual(config.output_hidden_states\t\t\t\t\t, __a\t)\r check_encoder_attentions_output(__a\t)\r\r # Check attention is always last and order is fine\r _UpperCamelCase :\tAny\t\t\t\t\t\t= True\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= True\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= model_class(__a\t)\r _UpperCamelCase :\tint\t\t\t\t\t\t= model(self._prepare_for_class(__a\t\t\t\t\t, __a\t)\t)\r\r self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1)\t\t\t\t\t, len(__a\t)\t)\r self.assertEqual(model.config.output_hidden_states\t\t\t\t\t, __a\t)\r check_encoder_attentions_output(__a\t)\r\r @unittest.skip(\"LED keeps using potentially symbolic tensors in conditionals and breaks tracing.\"\t)\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : str\t)\t\t->\t\t\t\tDict:\r pass\r\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : Optional[int]\t)\t\t->\t\t\t\tTuple:\r # TODO: Head-masking not yet implement\r pass\r\r\r\r\r\rdef \t\t\t\t\tlowercase__ (\t\t\t\t\tlowercase_\t\t\t)\t\t\t\t\t->\t\tUnion[str, Any]:\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r return tf.constant(lowercase_\t\t,dtype=tf.intaa\t\t\t)\r\r\rlowerCamelCase__\t\t\t\t = 1E-4\r\r\r\r@slow\r@require_tf\rclass __SCREAMING_SNAKE_CASE (\t\t\t\t\t\tunittest.TestCase ):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : Tuple\t)\t\t->\t\t\t\tUnion[str, Any]:\r _UpperCamelCase :\tAny\t\t\t\t\t\t= TFLEDForConditionalGeneration.from_pretrained(\"allenai/led-base-16384\"\t).led\r\r # change to intended input here\r _UpperCamelCase :\tint\t\t\t\t\t\t= _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= prepare_led_inputs_dict(model.config\t\t\t\t\t, __a\t\t\t\t\t, __a\t)\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= model(**__a\t)[0]\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= (1, 1024, 768)\r self.assertEqual(output.shape\t\t\t\t\t, __a\t)\r # change to expected output here\r _UpperCamelCase :\tTuple\t\t\t\t\t\t= tf.convert_to_tensor(\r [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]]\t\t\t\t\t, )\r tf.debugging.assert_near(output[:, :3, :3]\t\t\t\t\t, __a\t\t\t\t\t, atol=1e-3\t)\r\r\r def __SCREAMING_SNAKE_CASE\t\t\t\t(\t\tself : Dict\t)\t\t->\t\t\t\tstr:\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= TFLEDForConditionalGeneration.from_pretrained(\"allenai/led-base-16384\"\t)\r\r # change to intended input here\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r _UpperCamelCase :\tList[str]\t\t\t\t\t\t= _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]]\t)\r _UpperCamelCase :\tOptional[Any]\t\t\t\t\t\t= prepare_led_inputs_dict(model.config\t\t\t\t\t, __a\t\t\t\t\t, __a\t)\r _UpperCamelCase :\tUnion[str, Any]\t\t\t\t\t\t= model(**__a\t)[0]\r _UpperCamelCase :\tint\t\t\t\t\t\t= (1, 1024, model.config.vocab_size)\r self.assertEqual(output.shape\t\t\t\t\t, __a\t)\r # change to expected output here\r _UpperCamelCase :\tOptional[int]\t\t\t\t\t\t= tf.convert_to_tensor(\r [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]]\t\t\t\t\t, )\r tf.debugging.assert_near(output[:, :3, :3]\t\t\t\t\t, __a\t\t\t\t\t, atol=1e-3\t\t\t\t\t, rtol=1e-3\t)\r"},"style_context_codestyle":{"kind":"number","value":51,"string":"51"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":542,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.testing_utils import require_pytesseract, require_torch\r\nfrom transformers.utils import is_pytesseract_available, is_torch_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\nif is_pytesseract_available():\r\n from PIL import Image\r\n\r\n from transformers import LayoutLMvaImageProcessor\r\nclass _lowercase (\t\t\t\t\t\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\tself , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , )\t\t\t->\t\t\t\t\t\tTuple:\r\n snake_case\t\t\t\t\t= size if size is not None else {'''height''': 18, '''width''': 18}\r\n snake_case\t\t\t\t\t= parent\r\n snake_case\t\t\t\t\t= batch_size\r\n snake_case\t\t\t\t\t= num_channels\r\n snake_case\t\t\t\t\t= image_size\r\n snake_case\t\t\t\t\t= min_resolution\r\n snake_case\t\t\t\t\t= max_resolution\r\n snake_case\t\t\t\t\t= do_resize\r\n snake_case\t\t\t\t\t= size\r\n snake_case\t\t\t\t\t= apply_ocr\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tint:\r\n return {\"do_resize\": self.do_resize, \"size\": self.size, \"apply_ocr\": self.apply_ocr}\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_pytesseract\r\nclass _lowercase (\t\t\t\t\t\t\tsnake_case_\t\t\t\t\t,\t\tunittest.TestCase ):\r\n _UpperCAmelCase\t\t\t\t\t = LayoutLMvaImageProcessor if is_pytesseract_available() else None\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tstr:\r\n snake_case\t\t\t\t\t= LayoutLMvaImageProcessingTester(self )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n @property\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tint:\r\n return self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tList[Any]:\r\n snake_case\t\t\t\t\t= self.image_processing_class(**self.image_processor_dict )\r\n self.assertTrue(hasattr(A__ , '''do_resize''' ) )\r\n self.assertTrue(hasattr(A__ , '''size''' ) )\r\n self.assertTrue(hasattr(A__ , '''apply_ocr''' ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tstr:\r\n snake_case\t\t\t\t\t= self.image_processing_class.from_dict(self.image_processor_dict )\r\n self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )\r\n\r\n snake_case\t\t\t\t\t= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )\r\n self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tAny:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tOptional[int]:\r\n # Initialize image_processing\r\n snake_case\t\t\t\t\t= self.image_processing_class(**self.image_processor_dict )\r\n # create random PIL images\r\n snake_case\t\t\t\t\t= prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )\r\n for image in image_inputs:\r\n self.assertIsInstance(A__ , Image.Image )\r\n\r\n # Test not batched input\r\n snake_case\t\t\t\t\t= image_processing(image_inputs[0] , return_tensors='''pt''' )\r\n self.assertEqual(\r\n encoding.pixel_values.shape , (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n self.assertIsInstance(encoding.words , A__ )\r\n self.assertIsInstance(encoding.boxes , A__ )\r\n\r\n # Test batched\r\n snake_case\t\t\t\t\t= image_processing(A__ , return_tensors='''pt''' ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape , (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tUnion[str, Any]:\r\n # Initialize image_processing\r\n snake_case\t\t\t\t\t= self.image_processing_class(**self.image_processor_dict )\r\n # create random numpy tensors\r\n snake_case\t\t\t\t\t= prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )\r\n for image in image_inputs:\r\n self.assertIsInstance(A__ , np.ndarray )\r\n\r\n # Test not batched input\r\n snake_case\t\t\t\t\t= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape , (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n # Test batched\r\n snake_case\t\t\t\t\t= image_processing(A__ , return_tensors='''pt''' ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape , (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tUnion[str, Any]:\r\n # Initialize image_processing\r\n snake_case\t\t\t\t\t= self.image_processing_class(**self.image_processor_dict )\r\n # create random PyTorch tensors\r\n snake_case\t\t\t\t\t= prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )\r\n for image in image_inputs:\r\n self.assertIsInstance(A__ , torch.Tensor )\r\n\r\n # Test not batched input\r\n snake_case\t\t\t\t\t= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape , (\r\n 1,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n # Test batched\r\n snake_case\t\t\t\t\t= image_processing(A__ , return_tensors='''pt''' ).pixel_values\r\n self.assertEqual(\r\n encoded_images.shape , (\r\n self.image_processor_tester.batch_size,\r\n self.image_processor_tester.num_channels,\r\n self.image_processor_tester.size['''height'''],\r\n self.image_processor_tester.size['''width'''],\r\n ) , )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def UpperCamelCase (\t\t\tself )\t\t\t->\t\t\t\t\t\tOptional[Any]:\r\n # with apply_OCR = True\r\n snake_case\t\t\t\t\t= LayoutLMvaImageProcessor()\r\n\r\n from datasets import load_dataset\r\n\r\n snake_case\t\t\t\t\t= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )\r\n\r\n snake_case\t\t\t\t\t= Image.open(ds[0]['''file'''] ).convert('''RGB''' )\r\n\r\n snake_case\t\t\t\t\t= image_processing(A__ , return_tensors='''pt''' )\r\n\r\n self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )\r\n self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )\r\n\r\n # fmt: off\r\n # the words and boxes were obtained with Tesseract 4.1.1\r\n snake_case\t\t\t\t\t= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231\r\n snake_case\t\t\t\t\t= [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231\r\n # fmt: on\r\n\r\n self.assertListEqual(encoding.words , A__ )\r\n self.assertListEqual(encoding.boxes , A__ )\r\n\r\n # with apply_OCR = False\r\n snake_case\t\t\t\t\t= LayoutLMvaImageProcessor(apply_ocr=A__ )\r\n\r\n snake_case\t\t\t\t\t= image_processing(A__ , return_tensors='''pt''' )\r\n\r\n self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )\r\n\r\n"},"code_codestyle":{"kind":"number","value":342,"string":"342"},"style_context":{"kind":"string","value":"\r\n\r\n__a :Optional[int] =\t\t\t[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]\r\n\r\ndef __snake_case\t\t\t\t\t\t( __UpperCamelCase\t: int ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\t0\r\n while number:\r\n # Increased Speed Slightly by checking every 5 digits together.\r\n sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]\r\n number //= 10_0000\r\n\r\n return sum_of_digits_squared\r\n\r\n\r\n# There are 2 Chains made,\r\n# One ends with 89 with the chain member 58 being the one which when declared first,\r\n# there will be the least number of iterations for all the members to be checked.\r\n\r\n# The other one ends with 1 and has only one element 1.\r\n\r\n# So 58 and 1 are chosen to be declared at the starting.\r\n\r\n# Changed dictionary to an array to quicken the solution\r\n__a :list[bool | None] =\t\t\t[None] * 1000_0000\r\n__a :Optional[Any] =\t\t\tTrue\r\n__a :List[Any] =\t\t\tFalse\r\n\r\ndef __snake_case\t\t\t\t\t\t( __UpperCamelCase\t: int ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n if CHAINS[number - 1] is not None:\r\n return CHAINS[number - 1] # type: ignore\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\tchain(next_number(__UpperCamelCase ) )\r\n A_\t\t\t\t\t\t =\t\t\t\tnumber_chain\r\n\r\n while number < 1000_0000:\r\n A_\t\t\t\t\t\t =\t\t\t\tnumber_chain\r\n number *= 10\r\n\r\n return number_chain\r\n\r\ndef __snake_case\t\t\t\t\t\t( __UpperCamelCase\t: int = 1000_0000 ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n for i in range(1\t\t\t\t,__UpperCamelCase ):\r\n if CHAINS[i] is None:\r\n chain(i + 1 )\r\n\r\n return CHAINS[:number].count(__UpperCamelCase )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n print(F\"{solution() = }\")"},"style_context_codestyle":{"kind":"number","value":86,"string":"86"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":543,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\rdef \t\t\t\t\t\tsnake_case (UpperCamelCase : List[Any] ):\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\r\r\r\r\tlowerCamelCase__ = [0] * len(UpperCamelCase )\r\tlowerCamelCase__ = []\r\tlowerCamelCase__ = [1] * len(UpperCamelCase )\r\r\tfor values in graph.values():\r\t\tfor i in values:\r\t\t\tindegree[i] += 1\r\r\tfor i in range(len(UpperCamelCase ) ):\r\t\tif indegree[i] == 0:\r\t\t\tqueue.append(UpperCamelCase )\r\r\twhile queue:\r\t\tlowerCamelCase__ = queue.pop(0 )\r\t\tfor x in graph[vertex]:\r\t\t\tindegree[x] -= 1\r\r\t\t\tif long_dist[vertex] + 1 > long_dist[x]:\r\t\t\t\tlowerCamelCase__ = long_dist[vertex] + 1\r\r\t\t\tif indegree[x] == 0:\r\t\t\t\tqueue.append(UpperCamelCase )\r\r\tprint(max(UpperCamelCase ) )\r\r\r# Adjacency list of Graph\ra__\t\t\t\t\t\t:\t\tint\t\t\t\t= {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}\r\r\rlongest_distance(graph)\r"},"code_codestyle":{"kind":"number","value":235,"string":"235"},"style_context":{"kind":"string","value":"\r\r\r\r\r\rdef \t\t\t\t\t\tsnake_case (UpperCamelCase : int = 50 ):\r\r\r\r\r\t'''simple docstring'''\r\r\r\r\r\r\r\r\tlowerCamelCase__ = [1] * (length + 1)\r\r\tfor row_length in range(3 ,\t\t\t\t\t\tlength + 1 ):\r\t\tfor block_length in range(3 ,\t\t\t\t\t\trow_length + 1 ):\r\t\t\tfor block_start in range(row_length - block_length ):\r\t\t\t\tways_number[row_length] += ways_number[\r\t\t\t\t row_length - block_start - block_length - 1\r\t\t\t\t]\r\r\t\t\tways_number[row_length] += 1\r\r\treturn ways_number[length]\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\tprint(f'''{solution() = }''')\r"},"style_context_codestyle":{"kind":"number","value":235,"string":"235"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":544,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\nimport os\r\n\r\nimport pytest\r\n\r\nfrom datasets import (\r\n get_dataset_config_info,\r\n get_dataset_config_names,\r\n get_dataset_infos,\r\n get_dataset_split_names,\r\n inspect_dataset,\r\n inspect_metric,\r\n)\r\n\r\n\r\nSCREAMING_SNAKE_CASE__ = pytest.mark.integration\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\"path\" ,[\"paws\", \"csv\"]\t\t\t\t\t\t\t)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: Union[str, Any] ,_snake_case\t: str\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n inspect_dataset(_snake_case ,_snake_case\t\t\t\t\t\t\t)\r\n lowercase__\t\t\t\t\t\t\t\t\t= path + \".py\"\r\n assert script_name in os.listdir(_snake_case\t\t\t\t\t\t\t)\r\n assert \"__pycache__\" not in os.listdir(_snake_case\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n@pytest.mark.filterwarnings(\"ignore:inspect_metric is deprecated:FutureWarning\"\t\t\t\t\t\t\t)\r\n@pytest.mark.filterwarnings(\"ignore:metric_module_factory is deprecated:FutureWarning\"\t\t\t\t\t\t\t)\r\n@pytest.mark.parametrize(\"path\" ,[\"accuracy\"]\t\t\t\t\t\t\t)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: List[Any] ,_snake_case\t: Dict\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n inspect_metric(_snake_case ,_snake_case\t\t\t\t\t\t\t)\r\n lowercase__\t\t\t\t\t\t\t\t\t= path + \".py\"\r\n assert script_name in os.listdir(_snake_case\t\t\t\t\t\t\t)\r\n assert \"__pycache__\" not in os.listdir(_snake_case\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, config_name, expected_splits\" ,[\r\n (\"squad\", \"plain_text\", [\"train\", \"validation\"]),\r\n (\"dalle-mini/wit\", \"dalle-mini--wit\", [\"train\"]),\r\n (\"paws\", \"labeled_final\", [\"train\", \"test\", \"validation\"]),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: List[str] ,_snake_case\t: Dict ,_snake_case\t: Union[str, Any]\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n lowercase__\t\t\t\t\t\t\t\t\t= get_dataset_config_info(_snake_case ,config_name=_snake_case\t\t\t\t\t\t\t)\r\n assert info.config_name == config_name\r\n assert list(info.splits.keys()\t\t\t\t\t\t\t) == expected_splits\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, config_name, expected_exception\" ,[\r\n (\"paws\", None, ValueError),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: Any ,_snake_case\t: List[str] ,_snake_case\t: Any\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n with pytest.raises(_snake_case\t\t\t\t\t\t\t):\r\n get_dataset_config_info(_snake_case ,config_name=_snake_case\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, expected\" ,[\r\n (\"squad\", \"plain_text\"),\r\n (\"acronym_identification\", \"default\"),\r\n (\"lhoestq/squad\", \"plain_text\"),\r\n (\"lhoestq/test\", \"default\"),\r\n (\"lhoestq/demo1\", \"lhoestq--demo1\"),\r\n (\"dalle-mini/wit\", \"dalle-mini--wit\"),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: int ,_snake_case\t: List[str]\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n lowercase__\t\t\t\t\t\t\t\t\t= get_dataset_config_names(_snake_case\t\t\t\t\t\t\t)\r\n assert expected in config_names\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, expected_configs, expected_splits_in_first_config\" ,[\r\n (\"squad\", [\"plain_text\"], [\"train\", \"validation\"]),\r\n (\"dalle-mini/wit\", [\"dalle-mini--wit\"], [\"train\"]),\r\n (\"paws\", [\"labeled_final\", \"labeled_swap\", \"unlabeled_final\"], [\"train\", \"test\", \"validation\"]),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: Union[str, Any] ,_snake_case\t: Any ,_snake_case\t: int\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n lowercase__\t\t\t\t\t\t\t\t\t= get_dataset_infos(_snake_case\t\t\t\t\t\t\t)\r\n assert list(infos.keys()\t\t\t\t\t\t\t) == expected_configs\r\n lowercase__\t\t\t\t\t\t\t\t\t= expected_configs[0]\r\n assert expected_config in infos\r\n lowercase__\t\t\t\t\t\t\t\t\t= infos[expected_config]\r\n assert info.config_name == expected_config\r\n assert list(info.splits.keys()\t\t\t\t\t\t\t) == expected_splits_in_first_config\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, expected_config, expected_splits\" ,[\r\n (\"squad\", \"plain_text\", [\"train\", \"validation\"]),\r\n (\"dalle-mini/wit\", \"dalle-mini--wit\", [\"train\"]),\r\n (\"paws\", \"labeled_final\", [\"train\", \"test\", \"validation\"]),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: Optional[int] ,_snake_case\t: Any ,_snake_case\t: List[Any]\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n lowercase__\t\t\t\t\t\t\t\t\t= get_dataset_infos(_snake_case\t\t\t\t\t\t\t)\r\n assert expected_config in infos\r\n lowercase__\t\t\t\t\t\t\t\t\t= infos[expected_config]\r\n assert info.config_name == expected_config\r\n assert list(info.splits.keys()\t\t\t\t\t\t\t) == expected_splits\r\n\r\n\r\n\r\n\r\n@pytest.mark.parametrize(\r\n \"path, config_name, expected_exception\" ,[\r\n (\"paws\", None, ValueError),\r\n ] ,)\r\ndef \t\t\t\t\t\t\tlowerCamelCase\t(\t\t\t\t\t_snake_case\t: str ,_snake_case\t: Tuple ,_snake_case\t: Optional[Any]\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n with pytest.raises(_snake_case\t\t\t\t\t\t\t):\r\n get_dataset_split_names(_snake_case ,config_name=_snake_case\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":267,"string":"267"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\nfrom typing import Optional, Union\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom ...configuration_utils import ConfigMixin, register_to_config\r\nfrom ...models.modeling_utils import ModelMixin\r\n\r\n\r\nclass \t\t\t\t\tsnake_case\t\t\t(UpperCamelCase\t\t, UpperCamelCase ):\r\n\r\n\r\n\r\n @register_to_config\r\n def __init__(\t\t\t\t\tself ,UpperCAmelCase_ = 768 ,)\t->\t\t\t\tList[Any]:\r\n super().__init__()\r\n\r\n lowercase__\t\t\t\t\t\t\t\t\t= nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) )\r\n lowercase__\t\t\t\t\t\t\t\t\t= nn.Parameter(torch.ones(1 ,UpperCAmelCase_ ) )\r\n\r\n\r\n\r\n def \t\t\t\t_a (\t\t\t\t\tself ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,)\t->\t\t\t\tAny:\r\n lowercase__\t\t\t\t\t\t\t\t\t= nn.Parameter(self.mean.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )\r\n lowercase__\t\t\t\t\t\t\t\t\t= nn.Parameter(self.std.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )\r\n return self\r\n\r\n\r\n\r\n def \t\t\t\t_a (\t\t\t\t\tself ,UpperCAmelCase_ )\t->\t\t\t\tTuple:\r\n lowercase__\t\t\t\t\t\t\t\t\t= (embeds - self.mean) * 1.0 / self.std\r\n return embeds\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t_a (\t\t\t\t\tself ,UpperCAmelCase_ )\t->\t\t\t\tList[str]:\r\n lowercase__\t\t\t\t\t\t\t\t\t= (embeds * self.std) + self.mean\r\n return embeds\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":267,"string":"267"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":545,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\n\r\nimport argparse\r\n\r\nimport torch\r\n\r\nfrom transformers import (\r\n EncodecConfig,\r\n EncodecFeatureExtractor,\r\n EncodecModel,\r\n logging,\r\n)\r\n\r\n\r\n# checkpoints downloaded from:\r\n# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th\r\n# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin\r\n# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th\r\n\r\n\r\nlogging.set_verbosity_info()\r\n__lowercase =\t\t\t\t\t\t\tlogging.get_logger('''transformers.models.encodec''')\r\n\r\n__lowercase =\t\t\t\t\t\t\t{\r\n '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',\r\n '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',\r\n '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',\r\n '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',\r\n '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',\r\n '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',\r\n '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',\r\n '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',\r\n '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',\r\n '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',\r\n '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',\r\n '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',\r\n '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',\r\n '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',\r\n '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',\r\n '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',\r\n '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',\r\n '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',\r\n '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',\r\n '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',\r\n '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',\r\n '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',\r\n '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',\r\n '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',\r\n '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',\r\n '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',\r\n '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',\r\n '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',\r\n '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',\r\n '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',\r\n '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',\r\n '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',\r\n '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',\r\n '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',\r\n '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',\r\n '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',\r\n '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',\r\n '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',\r\n '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',\r\n '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',\r\n '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',\r\n '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',\r\n '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',\r\n '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',\r\n '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',\r\n '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',\r\n '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',\r\n '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',\r\n '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',\r\n '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',\r\n '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',\r\n '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',\r\n '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',\r\n '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',\r\n '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',\r\n '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',\r\n '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',\r\n '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',\r\n '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',\r\n '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',\r\n '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',\r\n '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',\r\n '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',\r\n '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',\r\n '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',\r\n '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',\r\n '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',\r\n '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',\r\n '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',\r\n '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',\r\n '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',\r\n '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',\r\n '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',\r\n '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n **MAPPING_QUANTIZER,\r\n **MAPPING_ENCODER,\r\n **MAPPING_DECODER,\r\n}\r\n__lowercase =\t\t\t\t\t\t\t{\r\n **MAPPING_QUANTIZER,\r\n **MAPPING_ENCODER,\r\n **MAPPING_ENCODER_48K,\r\n **MAPPING_DECODER,\r\n **MAPPING_DECODER_48K,\r\n}\r\n__lowercase =\t\t\t\t\t\t\t[]\r\n__lowercase =\t\t\t\t\t\t\t[]\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tTuple ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tDict ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tOptional[int] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tint ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tint ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tfor attribute in key.split('''.''' ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =getattr(__UpperCamelCase ,\t\t\t\t__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\tif weight_type is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =getattr(__UpperCamelCase ,\t\t\t\t__UpperCamelCase ).shape\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =hf_pointer.shape\r\n\r\n\t\t\t\t\t\t\tif hf_shape != value.shape:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"Shape of hf {key + \".\" + weight_type if weight_type is not None else \"\"} is {hf_shape}, but should be\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\" {value.shape} for {full_name}\"\"\" )\r\n\r\n\t\t\t\t\t\t\tif weight_type == \"weight\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_g\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_v\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"bias\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"running_mean\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"running_var\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"num_batches_tracked\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_ih_l0\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_hh_l0\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"bias_ih_l0\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"bias_hh_l0\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_ih_l1\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"weight_hh_l1\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"bias_ih_l1\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telif weight_type == \"bias_hh_l1\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =value\r\n\r\n\t\t\t\t\t\t\tlogger.info(F\"\"\"{key + (\".\" + weight_type if weight_type is not None else \"\")} was initialized from {full_name}.\"\"\" )\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tList[Any] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tOptional[int] ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tfor key in ignore_keys:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif key.endswith('''.*''' ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif name.startswith(key[:-1] ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \".*.\" in key:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase ,\t\t\t__UpperCamelCase\t =key.split('''.*.''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif prefix in name and suffix in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif key in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\t\treturn False\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tList[Any] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tstr ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tUnion[str, Any] ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =[]\r\n\r\n\t\t\t\t\t\t\tif model_name == \"encodec_24khz\" or \"encodec_32khz\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =MAPPING_24K\r\n\t\t\t\t\t\t\telif model_name == \"encodec_48khz\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =MAPPING_48K\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"\"\"Unsupported model: {model_name}\"\"\" )\r\n\r\n\t\t\t\t\t\t\tfor name, value in orig_dict.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif should_ignore(__UpperCamelCase ,\t\t\t\t__UpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(F\"\"\"{name} was ignored\"\"\" )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =False\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor key, mapped_key in MAPPING.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"*\" in key:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase ,\t\t\t__UpperCamelCase\t =key.split('''.*.''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif prefix in name and suffix in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =suffix\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif key in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# HACK otherwise .embed gets initialized with .embed_avg too\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"*\" in mapped_key:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =name.split(__UpperCamelCase )[0].split('''.''' )[-2]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =mapped_key.replace('''*''' ,\t\t\t\t__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"weight_g\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_g'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_v\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_v'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_ih_l0\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_ih_l0'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_hh_l0\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_hh_l0'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias_ih_l0\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''bias_ih_l0'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias_hh_l0\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''bias_hh_l0'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_ih_l1\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_ih_l1'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight_hh_l1\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight_hh_l1'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias_ih_l1\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''bias_ih_l1'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias_hh_l1\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''bias_hh_l1'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"bias\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''bias'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"weight\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''weight'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"running_mean\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''running_mean'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"running_var\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''running_var'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"num_batches_tracked\" in name:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''num_batches_tracked'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =None\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tset_recursively(__UpperCamelCase ,\t\t\t\t__UpperCamelCase ,\t\t\t\t__UpperCamelCase ,\t\t\t\t__UpperCamelCase ,\t\t\t\t__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontinue\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not is_used:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tunused_weights.append(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\tlogger.warning(F\"\"\"Unused weights: {unused_weights}\"\"\" )\r\n\r\n\r\n@torch.no_grad()\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tOptional[Any] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tDict ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tint ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tList[str]=None ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tDict=None ,\t\t\t\t):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tif config_path is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =EncodecConfig.from_pretrained(__UpperCamelCase )\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =EncodecConfig()\r\n\r\n\t\t\t\t\t\t\tif model_name == \"encodec_24khz\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpass # config is already correct\r\n\t\t\t\t\t\t\telif model_name == \"encodec_32khz\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =[8, 5, 4, 4]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =[2.2]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =6_4\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =3_2_0_0_0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =2_0_4_8\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =False\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =False\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =False\r\n\t\t\t\t\t\t\telif model_name == \"encodec_48khz\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =[8, 5, 4, 2]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =[3.0, 6.0, 1_2.0, 2_4.0]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =4_8_0_0_0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =2\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =False\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ='''time_group_norm'''\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =True\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =1.0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =0.0_1\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F\"\"\"Unknown model name: {model_name}\"\"\" )\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =EncodecModel(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =EncodecFeatureExtractor(\r\n\t\t\t\t\t\t\t feature_size=config.audio_channels ,\t\t\t\tsampling_rate=config.sampling_rate ,\t\t\t\tchunk_length_s=config.chunk_length_s ,\t\t\t\toverlap=config.overlap ,\t\t\t\t)\r\n\t\t\t\t\t\t\tfeature_extractor.save_pretrained(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =torch.load(__UpperCamelCase )\r\n\t\t\t\t\t\t\tif \"best_state\" in original_checkpoint:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# we might have a training state saved, in which case discard the yaml results and just retain the weights\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =original_checkpoint['''best_state''']\r\n\t\t\t\t\t\t\trecursively_load_weights(__UpperCamelCase ,\t\t\t\t__UpperCamelCase ,\t\t\t\t__UpperCamelCase )\r\n\t\t\t\t\t\t\tmodel.save_pretrained(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\tif repo_id:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint('''Pushing to the hub...''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_extractor.push_to_hub(__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.push_to_hub(__UpperCamelCase )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t__lowercase =\t\t\t\t\t\t\targparse.ArgumentParser()\r\n\t\t\t\tparser.add_argument(\r\n\t\t\t\t '''--model''',\r\n\t\t\t\t default='''encodec_24khz''',\r\n\t\t\t\t type=str,\r\n\t\t\t\t help='''The model to convert. Should be one of \\'encodec_24khz\\', \\'encodec_32khz\\', \\'encodec_48khz\\'.''',\r\n\t\t\t\t)\r\n\t\t\t\tparser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')\r\n\t\t\t\tparser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')\r\n\t\t\t\tparser.add_argument(\r\n\t\t\t\t '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''\r\n\t\t\t\t)\r\n\t\t\t\tparser.add_argument(\r\n\t\t\t\t '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''\r\n\t\t\t\t)\r\n\r\n\t\t\t\t__lowercase =\t\t\t\t\t\t\tparser.parse_args()\r\n\t\t\t\tconvert_checkpoint(\r\n\t\t\t\t args.model,\r\n\t\t\t\t args.checkpoint_path,\r\n\t\t\t\t args.pytorch_dump_folder_path,\r\n\t\t\t\t args.config_path,\r\n\t\t\t\t args.push_to_hub,\r\n\t\t\t\t)\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":296,"string":"296"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\n\r\nimport heapq as hq\r\nimport math\r\nfrom collections.abc import Iterator\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass \t\t_lowercase :\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef __init__(\t\t\t\t\t\t\tself :\t\t\t\t\t\tTuple , UpperCamelCase__ :\t\t\t\t\t\tint\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tList[str]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =str(id_\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =None\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =None\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =[]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t ={} # {vertex:distance}\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef __lt__(\t\t\t\t\t\t\tself :\t\t\t\t\t\tDict , UpperCamelCase__ :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.key < other.key\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef __repr__(\t\t\t\t\t\t\tself :\t\t\t\t\t\tint\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.id\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase_ (\t\t\t\t\t\t\tself :\t\t\t\t\t\tList[Any] , UpperCamelCase__ :\t\t\t\t\t\tint\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tOptional[Any]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.neighbors.append(UpperCamelCase__\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tdef UpperCAmelCase_ (\t\t\t\t\t\t\tself :\t\t\t\t\t\tOptional[int] , UpperCamelCase__ :\t\t\t\t\t\tTuple , UpperCamelCase__ :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\t\t\t\t\tTuple:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =weight\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tDict ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tList[str] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tList[str] ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tint ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tgraph[a - 1].add_neighbor(graph[b - 1] )\r\n\t\t\t\t\t\t\tgraph[b - 1].add_neighbor(graph[a - 1] )\r\n\t\t\t\t\t\t\t# add the edges:\r\n\t\t\t\t\t\t\tgraph[a - 1].add_edge(graph[b - 1] ,\t\t\t\t__UpperCamelCase )\r\n\t\t\t\t\t\t\tgraph[b - 1].add_edge(graph[a - 1] ,\t\t\t\t__UpperCamelCase )\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tlist ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tVertex ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =[]\r\n\t\t\t\t\t\t\tfor u in graph:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =math.inf\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =None\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =0\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =graph[:]\r\n\t\t\t\t\t\t\twhile q:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =min(__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tq.remove(__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor v in u.neighbors:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif (v in q) and (u.edges[v.id] < v.key):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =u\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =u.edges[v.id]\r\n\t\t\t\t\t\t\tfor i in range(1 ,\t\t\t\tlen(__UpperCamelCase ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ta.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )\r\n\t\t\t\t\t\t\treturn a\r\n\r\n\r\ndef lowerCAmelCase (__UpperCamelCase :\t\t\t\t\t\tlist ,\t\t\t\t__UpperCamelCase :\t\t\t\t\t\tVertex ):\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\tfor u in graph:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =math.inf\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =None\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =0\r\n\r\n\t\t\t\t\t\t\t__UpperCamelCase\t =list(__UpperCamelCase )\r\n\t\t\t\t\t\t\thq.heapify(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\twhile h:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =hq.heappop(__UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor v in u.neighbors:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif (v in h) and (u.edges[v.id] < v.key):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =u\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCamelCase\t =u.edges[v.id]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\thq.heapify(__UpperCamelCase )\r\n\r\n\t\t\t\t\t\t\tfor i in range(1 ,\t\t\t\tlen(__UpperCamelCase ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tyield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)\r\n\r\n\r\ndef lowerCAmelCase ():\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\timport doctest\r\n\r\n\t\t\t\tdoctest.testmod()\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":296,"string":"296"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":546,"cells":{"code":{"kind":"string","value":"import warnings\rfrom typing import List\r\rimport numpy as np\r\rfrom ...processing_utils import ProcessorMixin\rfrom ...tokenization_utils_base import BatchEncoding\rfrom ...utils import is_flax_available, is_tf_available, is_torch_available\rclass \t\t\t\t\tsnake_case\t( __snake_case ):\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCAmelCase \t\t\t\t\t\t= [\"\"\"image_processor\"\"\", \"\"\"tokenizer\"\"\"]\r __lowerCAmelCase \t\t\t\t\t\t= \"\"\"OwlViTImageProcessor\"\"\"\r __lowerCAmelCase \t\t\t\t\t\t= (\"\"\"CLIPTokenizer\"\"\", \"\"\"CLIPTokenizerFast\"\"\")\r\r\r\r\r\r def __init__( self\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\t**lowerCAmelCase_\t):\r __lowercase\t\t\t\t\t=\t\tNone\r if \"feature_extractor\" in kwargs:\r warnings.warn(\r \"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`\"\r \" instead.\"\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\t)\r __lowercase\t\t\t\t\t=\t\tkwargs.pop(\"feature_extractor\"\t)\r\r __lowercase\t\t\t\t\t=\t\timage_processor if image_processor is not None else feature_extractor\r if image_processor is None:\r raise ValueError(\"You need to specify an `image_processor`.\"\t)\r if tokenizer is None:\r raise ValueError(\"You need to specify a `tokenizer`.\"\t)\r\r super().__init__(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t)\r\r\r\r\r\r def __call__( self\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\tlowerCAmelCase_=\"max_length\"\t\t\t\t\t,\tlowerCAmelCase_=\"np\"\t\t\t\t\t,\t**lowerCAmelCase_\t):\r\r if text is None and query_images is None and images is None:\r raise ValueError(\r \"You have to specify at least one text or query image or image. All three cannot be none.\"\t)\r\r if text is not None:\r if isinstance(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t) or (isinstance(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t) and not isinstance(text[0]\t\t\t\t\t,\tlowerCAmelCase_\t)):\r __lowercase\t\t\t\t\t=\t\t[self.tokenizer(lowerCAmelCase_\t\t\t\t\t,\tpadding=lowerCAmelCase_\t\t\t\t\t,\treturn_tensors=lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)]\r\r elif isinstance(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t) and isinstance(text[0]\t\t\t\t\t,\tlowerCAmelCase_\t):\r __lowercase\t\t\t\t\t=\t\t[]\r\r # Maximum number of queries across batch\r __lowercase\t\t\t\t\t=\t\tmax([len(lowerCAmelCase_\t) for t in text]\t)\r\r # Pad all batch samples to max number of text queries\r for t in text:\r if len(lowerCAmelCase_\t) != max_num_queries:\r __lowercase\t\t\t\t\t=\t\tt + [\" \"] * (max_num_queries - len(lowerCAmelCase_\t))\r\r __lowercase\t\t\t\t\t=\t\tself.tokenizer(lowerCAmelCase_\t\t\t\t\t,\tpadding=lowerCAmelCase_\t\t\t\t\t,\treturn_tensors=lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r encodings.append(lowerCAmelCase_\t)\r else:\r raise TypeError(\"Input text should be a string, a list of strings or a nested list of strings\"\t)\r\r if return_tensors == \"np\":\r __lowercase\t\t\t\t\t=\t\tnp.concatenate([encoding[\"input_ids\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r __lowercase\t\t\t\t\t=\t\tnp.concatenate([encoding[\"attention_mask\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r\r elif return_tensors == \"jax\" and is_flax_available():\r import jax.numpy as jnp\r\r __lowercase\t\t\t\t\t=\t\tjnp.concatenate([encoding[\"input_ids\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r __lowercase\t\t\t\t\t=\t\tjnp.concatenate([encoding[\"attention_mask\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r\r elif return_tensors == \"pt\" and is_torch_available():\r import torch\r\r __lowercase\t\t\t\t\t=\t\ttorch.cat([encoding[\"input_ids\"] for encoding in encodings]\t\t\t\t\t,\tdim=0\t)\r __lowercase\t\t\t\t\t=\t\ttorch.cat([encoding[\"attention_mask\"] for encoding in encodings]\t\t\t\t\t,\tdim=0\t)\r\r elif return_tensors == \"tf\" and is_tf_available():\r import tensorflow as tf\r\r __lowercase\t\t\t\t\t=\t\ttf.stack([encoding[\"input_ids\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r __lowercase\t\t\t\t\t=\t\ttf.stack([encoding[\"attention_mask\"] for encoding in encodings]\t\t\t\t\t,\taxis=0\t)\r\r else:\r raise ValueError(\"Target return tensor type could not be returned\"\t)\r\r __lowercase\t\t\t\t\t=\t\tBatchEncoding()\r __lowercase\t\t\t\t\t=\t\tinput_ids\r __lowercase\t\t\t\t\t=\t\tattention_mask\r\r if query_images is not None:\r __lowercase\t\t\t\t\t=\t\tBatchEncoding()\r __lowercase\t\t\t\t\t=\t\tself.image_processor(\r lowerCAmelCase_\t\t\t\t\t,\treturn_tensors=lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t).pixel_values\r __lowercase\t\t\t\t\t=\t\tquery_pixel_values\r\r if images is not None:\r __lowercase\t\t\t\t\t=\t\tself.image_processor(lowerCAmelCase_\t\t\t\t\t,\treturn_tensors=lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r if text is not None and images is not None:\r __lowercase\t\t\t\t\t=\t\timage_features.pixel_values\r return encoding\r elif query_images is not None and images is not None:\r __lowercase\t\t\t\t\t=\t\timage_features.pixel_values\r return encoding\r elif text is not None or query_images is not None:\r return encoding\r else:\r return BatchEncoding(data=dict(**lowerCAmelCase_\t)\t\t\t\t\t,\ttensor_type=lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\t*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return self.image_processor.post_process(*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\t*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return self.image_processor.post_process_object_detection(*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\t*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\t*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return self.tokenizer.batch_decode(*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\t*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return self.tokenizer.decode(*lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r\r\r\r\r @property\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r warnings.warn(\r \"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.\"\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\t)\r return self.image_processor_class\r\r\r\r\r\r @property\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r warnings.warn(\r \"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.\"\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\t)\r return self.image_processor\r\r"},"code_codestyle":{"kind":"number","value":321,"string":"321"},"style_context":{"kind":"string","value":"import unittest\r\rfrom transformers import BigBirdConfig, is_flax_available\rfrom transformers.testing_utils import require_flax, slow\r\rfrom ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask\r\r\rif is_flax_available():\r import jax\r\r from transformers.models.big_bird.modeling_flax_big_bird import (\r FlaxBigBirdForCausalLM,\r FlaxBigBirdForMaskedLM,\r FlaxBigBirdForMultipleChoice,\r FlaxBigBirdForPreTraining,\r FlaxBigBirdForQuestionAnswering,\r FlaxBigBirdForSequenceClassification,\r FlaxBigBirdForTokenClassification,\r FlaxBigBirdModel,\r )\rclass \t\t\t\t\tsnake_case\t( unittest.TestCase ):\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r def __init__( self\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_=2\t\t\t\t\t,\tlowerCAmelCase_=56\t\t\t\t\t,\tlowerCAmelCase_=True\t\t\t\t\t,\tlowerCAmelCase_=True\t\t\t\t\t,\tlowerCAmelCase_=True\t\t\t\t\t,\tlowerCAmelCase_=True\t\t\t\t\t,\tlowerCAmelCase_=99\t\t\t\t\t,\tlowerCAmelCase_=32\t\t\t\t\t,\tlowerCAmelCase_=2\t\t\t\t\t,\tlowerCAmelCase_=2\t\t\t\t\t,\tlowerCAmelCase_=7\t\t\t\t\t,\tlowerCAmelCase_=\"gelu_new\"\t\t\t\t\t,\tlowerCAmelCase_=0.1\t\t\t\t\t,\tlowerCAmelCase_=0.1\t\t\t\t\t,\tlowerCAmelCase_=512\t\t\t\t\t,\tlowerCAmelCase_=16\t\t\t\t\t,\tlowerCAmelCase_=2\t\t\t\t\t,\tlowerCAmelCase_=0.02\t\t\t\t\t,\tlowerCAmelCase_=4\t\t\t\t\t,\tlowerCAmelCase_=\"block_sparse\"\t\t\t\t\t,\tlowerCAmelCase_=True\t\t\t\t\t,\tlowerCAmelCase_=False\t\t\t\t\t,\tlowerCAmelCase_=2\t\t\t\t\t,\tlowerCAmelCase_=3\t\t\t\t\t,\t):\r __lowercase\t\t\t\t\t=\t\tparent\r __lowercase\t\t\t\t\t=\t\tbatch_size\r __lowercase\t\t\t\t\t=\t\tseq_length\r __lowercase\t\t\t\t\t=\t\tis_training\r __lowercase\t\t\t\t\t=\t\tuse_attention_mask\r __lowercase\t\t\t\t\t=\t\tuse_token_type_ids\r __lowercase\t\t\t\t\t=\t\tuse_labels\r __lowercase\t\t\t\t\t=\t\tvocab_size\r __lowercase\t\t\t\t\t=\t\thidden_size\r __lowercase\t\t\t\t\t=\t\tnum_hidden_layers\r __lowercase\t\t\t\t\t=\t\tnum_attention_heads\r __lowercase\t\t\t\t\t=\t\tintermediate_size\r __lowercase\t\t\t\t\t=\t\thidden_act\r __lowercase\t\t\t\t\t=\t\thidden_dropout_prob\r __lowercase\t\t\t\t\t=\t\tattention_probs_dropout_prob\r __lowercase\t\t\t\t\t=\t\tmax_position_embeddings\r __lowercase\t\t\t\t\t=\t\ttype_vocab_size\r __lowercase\t\t\t\t\t=\t\ttype_sequence_label_size\r __lowercase\t\t\t\t\t=\t\tinitializer_range\r __lowercase\t\t\t\t\t=\t\tnum_choices\r\r __lowercase\t\t\t\t\t=\t\trescale_embeddings\r __lowercase\t\t\t\t\t=\t\tattention_type\r __lowercase\t\t\t\t\t=\t\tuse_bias\r __lowercase\t\t\t\t\t=\t\tblock_size\r __lowercase\t\t\t\t\t=\t\tnum_random_blocks\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r __lowercase\t\t\t\t\t=\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t,\tself.vocab_size\t)\r\r __lowercase\t\t\t\t\t=\t\tNone\r if self.use_attention_mask:\r __lowercase\t\t\t\t\t=\t\trandom_attention_mask([self.batch_size, self.seq_length]\t)\r\r __lowercase\t\t\t\t\t=\t\tNone\r if self.use_token_type_ids:\r __lowercase\t\t\t\t\t=\t\tids_tensor([self.batch_size, self.seq_length]\t\t\t\t\t,\tself.type_vocab_size\t)\r\r __lowercase\t\t\t\t\t=\t\tBigBirdConfig(\r vocab_size=self.vocab_size\t\t\t\t\t,\thidden_size=self.hidden_size\t\t\t\t\t,\tnum_hidden_layers=self.num_hidden_layers\t\t\t\t\t,\tnum_attention_heads=self.num_attention_heads\t\t\t\t\t,\tintermediate_size=self.intermediate_size\t\t\t\t\t,\thidden_act=self.hidden_act\t\t\t\t\t,\thidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t\t,\tattention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t\t,\tmax_position_embeddings=self.max_position_embeddings\t\t\t\t\t,\ttype_vocab_size=self.type_vocab_size\t\t\t\t\t,\tis_decoder=lowerCAmelCase_\t\t\t\t\t,\tinitializer_range=self.initializer_range\t\t\t\t\t,\tattention_type=self.attention_type\t\t\t\t\t,\tblock_size=self.block_size\t\t\t\t\t,\tnum_random_blocks=self.num_random_blocks\t\t\t\t\t,\tuse_bias=self.use_bias\t\t\t\t\t,\trescale_embeddings=self.rescale_embeddings\t\t\t\t\t,\t)\r\r return config, input_ids, token_type_ids, attention_mask\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r __lowercase\t\t\t\t\t=\t\tself.prepare_config_and_inputs()\r __lowercase\t\t\t\t\t, __lowercase\t\t\t\t\t, __lowercase\t\t\t\t\t, __lowercase\t\t\t\t\t=\t\tconfig_and_inputs\r __lowercase\t\t\t\t\t=\t\t{\r \"input_ids\": input_ids,\r \"token_type_ids\": token_type_ids,\r \"attention_mask\": attention_mask,\r }\r return config, inputs_dict\r\r\r\r\r\r\r\r@require_flax\rclass \t\t\t\t\tsnake_case\t( __snake_case\t\t\t\t\t\t\t,unittest.TestCase ):\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\r __lowerCAmelCase \t\t\t\t\t\t= (\r (\r FlaxBigBirdForCausalLM,\r FlaxBigBirdModel,\r FlaxBigBirdForPreTraining,\r FlaxBigBirdForMaskedLM,\r FlaxBigBirdForMultipleChoice,\r FlaxBigBirdForQuestionAnswering,\r FlaxBigBirdForSequenceClassification,\r FlaxBigBirdForTokenClassification,\r )\r if is_flax_available()\r else ()\r )\r\r __lowerCAmelCase \t\t\t\t\t\t= False\r __lowerCAmelCase \t\t\t\t\t\t= False\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r __lowercase\t\t\t\t\t=\t\tFlaxBigBirdModelTester(self\t)\r\r\r\r\r\r @slow\r # copied from `test_modeling_flax_common` because it takes much longer than other models\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r super().test_from_pretrained_save_pretrained()\r\r\r\r\r\r @slow\r # copied from `test_modeling_flax_common` because it takes much longer than other models\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r super().test_from_pretrained_with_no_automatic_init()\r\r\r\r\r\r @slow\r # copied from `test_modeling_flax_common` because it takes much longer than other models\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r super().test_no_automatic_init()\r\r\r\r\r\r @slow\r # copied from `test_modeling_flax_common` because it takes much longer than other models\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r super().test_hidden_states_output()\r\r\r\r\r\r @slow\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r for model_class_name in self.all_model_classes:\r __lowercase\t\t\t\t\t=\t\tmodel_class_name.from_pretrained(\"google/bigbird-roberta-base\"\t)\r self.assertIsNotNone(lowerCAmelCase_\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r if self.test_attn_probs:\r super().test_attention_outputs()\r\r\r\r\r\r @slow\r # copied from `test_modeling_flax_common` because it takes much longer than other models\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t):\r __lowercase\t\t\t\t\t, __lowercase\t\t\t\t\t=\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\r for model_class in self.all_model_classes:\r with self.subTest(model_class.__name__\t):\r __lowercase\t\t\t\t\t=\t\tself._prepare_for_class(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t)\r __lowercase\t\t\t\t\t=\t\tmodel_class(lowerCAmelCase_\t)\r\r @jax.jit\r def model_jitted(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_=None\t\t\t\t\t,\t**lowerCAmelCase_\t):\r return model(input_ids=lowerCAmelCase_\t\t\t\t\t,\tattention_mask=lowerCAmelCase_\t\t\t\t\t,\t**lowerCAmelCase_\t)\r\r with self.subTest(\"JIT Enabled\"\t):\r __lowercase\t\t\t\t\t=\t\tmodel_jitted(**lowerCAmelCase_\t).to_tuple()\r\r with self.subTest(\"JIT Disabled\"\t):\r with jax.disable_jit():\r __lowercase\t\t\t\t\t=\t\tmodel_jitted(**lowerCAmelCase_\t).to_tuple()\r\r self.assertEqual(len(lowerCAmelCase_\t)\t\t\t\t\t,\tlen(lowerCAmelCase_\t)\t)\r for jitted_output, output in zip(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t):\r self.assertEqual(jitted_output.shape\t\t\t\t\t,\toutput.shape\t)\r\r\r\r\r\r def \t\t\t\tsnake_case__\t\t\t\t\t\t\t( self\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_=1E-5\t\t\t\t\t,\tlowerCAmelCase_=\"outputs\"\t\t\t\t\t,\tlowerCAmelCase_=None\t):\r # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,\r # an effort was done to return `attention_probs` (yet to be verified).\r if name.startswith(\"outputs.attentions\"\t):\r return\r else:\r super().check_pt_flax_outputs(lowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t\t\t\t\t,\tlowerCAmelCase_\t)\r\r"},"style_context_codestyle":{"kind":"number","value":321,"string":"321"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":547,"cells":{"code":{"kind":"string","value":"\r\n'''simple docstring'''\r\n\r\nimport unittest\r\n\r\nfrom accelerate import debug_launcher\r\nfrom accelerate.test_utils import require_cpu, test_ops, test_script\r\n\r\n\r\n@require_cpu\r\nclass _UpperCAmelCase ( unittest.TestCase ):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n debug_launcher(test_script.main )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n debug_launcher(test_ops.main )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":460,"string":"460"},"style_context":{"kind":"string","value":"\r\n'''simple docstring'''\r\n\r\nimport os\r\nimport shutil\r\nimport tempfile\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers import AutoTokenizer, BarkProcessor\r\nfrom transformers.testing_utils import require_torch, slow\r\n\r\n\r\n@require_torch\r\nclass _UpperCAmelCase ( unittest.TestCase ):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n a_ :\tint =\t\t\t\t\t\t\t\"\"\"ylacombe/bark-small\"\"\"\r\n a_ :\tDict =\t\t\t\t\t\t\ttempfile.mkdtemp()\r\n a_ :\tUnion[str, Any] =\t\t\t\t\t\t\t\"\"\"en_speaker_1\"\"\"\r\n a_ :\tDict =\t\t\t\t\t\t\t\"\"\"This is a test string\"\"\"\r\n a_ :\tOptional[int] =\t\t\t\t\t\t\t\"\"\"speaker_embeddings_path.json\"\"\"\r\n a_ :\tint =\t\t\t\t\t\t\t\"\"\"speaker_embeddings\"\"\"\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self\t\t\t\t\t\t\t,\t\t**lowerCAmelCase_ ):\r\n\r\n '''simple docstring'''\r\n\r\n return AutoTokenizer.from_pretrained(self.checkpoint\t\t\t\t\t\t\t,\t\t**lowerCAmelCase_ )\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n shutil.rmtree(self.tmpdirname )\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n a_ :\tUnion[str, Any] =\t\t\t\t\t\t\tself.get_tokenizer()\r\n\r\n a_ :\tOptional[Any] =\t\t\t\t\t\t\tBarkProcessor(tokenizer=lowerCAmelCase_ )\r\n\r\n processor.save_pretrained(self.tmpdirname )\r\n a_ :\tList[str] =\t\t\t\t\t\t\tBarkProcessor.from_pretrained(self.tmpdirname )\r\n\r\n self.assertEqual(processor.tokenizer.get_vocab()\t\t\t\t\t\t\t,\t\ttokenizer.get_vocab() )\r\n\r\n\r\n @slow\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n a_ :\tAny =\t\t\t\t\t\t\tBarkProcessor.from_pretrained(\r\n pretrained_processor_name_or_path=self.checkpoint\t\t\t\t\t\t\t,\t\tspeaker_embeddings_dict_path=self.speaker_embeddings_dict_path\t\t\t\t\t\t\t,\t\t)\r\n processor.save_pretrained(\r\n self.tmpdirname\t\t\t\t\t\t\t,\t\tspeaker_embeddings_dict_path=self.speaker_embeddings_dict_path\t\t\t\t\t\t\t,\t\tspeaker_embeddings_directory=self.speaker_embeddings_directory\t\t\t\t\t\t\t,\t\t)\r\n\r\n a_ :\tList[str] =\t\t\t\t\t\t\tself.get_tokenizer(bos_token=\"\"\"(BOS)\"\"\"\t\t\t\t\t\t\t,\t\teos_token=\"\"\"(EOS)\"\"\" )\r\n\r\n a_ :\tAny =\t\t\t\t\t\t\tBarkProcessor.from_pretrained(\r\n self.tmpdirname\t\t\t\t\t\t\t,\t\tself.speaker_embeddings_dict_path\t\t\t\t\t\t\t,\t\tbos_token=\"\"\"(BOS)\"\"\"\t\t\t\t\t\t\t,\t\teos_token=\"\"\"(EOS)\"\"\"\t\t\t\t\t\t\t,\t\t)\r\n\r\n self.assertEqual(processor.tokenizer.get_vocab()\t\t\t\t\t\t\t,\t\ttokenizer_add_kwargs.get_vocab() )\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n a_ :\tList[Any] =\t\t\t\t\t\t\tBarkProcessor.from_pretrained(\r\n pretrained_processor_name_or_path=self.checkpoint\t\t\t\t\t\t\t,\t\tspeaker_embeddings_dict_path=self.speaker_embeddings_dict_path\t\t\t\t\t\t\t,\t\t)\r\n\r\n a_ :\tDict =\t\t\t\t\t\t\t35\r\n a_ :\tList[Any] =\t\t\t\t\t\t\t2\r\n a_ :\tOptional[int] =\t\t\t\t\t\t\t8\r\n\r\n a_ :\tint =\t\t\t\t\t\t\t{\r\n \"\"\"semantic_prompt\"\"\": np.ones(lowerCAmelCase_ ),\r\n \"\"\"coarse_prompt\"\"\": np.ones((nb_codebooks_coarse, seq_len) ),\r\n \"\"\"fine_prompt\"\"\": np.ones((nb_codebooks_total, seq_len) ),\r\n }\r\n\r\n # test providing already loaded voice_preset\r\n a_ :\tOptional[Any] =\t\t\t\t\t\t\tprocessor(text=self.input_string\t\t\t\t\t\t\t,\t\tvoice_preset=lowerCAmelCase_ )\r\n\r\n a_ :\tTuple =\t\t\t\t\t\t\tinputs[\"\"\"history_prompt\"\"\"]\r\n for key in voice_preset:\r\n self.assertListEqual(voice_preset[key].tolist()\t\t\t\t\t\t\t,\t\tprocessed_voice_preset.get(lowerCAmelCase_\t\t\t\t\t\t\t,\t\tnp.array([] ) ).tolist() )\r\n\r\n # test loading voice preset from npz file\r\n a_ :\tAny =\t\t\t\t\t\t\tos.path.join(self.tmpdirname\t\t\t\t\t\t\t,\t\t\"\"\"file.npz\"\"\" )\r\n np.savez(lowerCAmelCase_\t\t\t\t\t\t\t,\t\t**lowerCAmelCase_ )\r\n a_ :\tAny =\t\t\t\t\t\t\tprocessor(text=self.input_string\t\t\t\t\t\t\t,\t\tvoice_preset=lowerCAmelCase_ )\r\n a_ :\tList[Any] =\t\t\t\t\t\t\tinputs[\"\"\"history_prompt\"\"\"]\r\n\r\n for key in voice_preset:\r\n self.assertListEqual(voice_preset[key].tolist()\t\t\t\t\t\t\t,\t\tprocessed_voice_preset.get(lowerCAmelCase_\t\t\t\t\t\t\t,\t\tnp.array([] ) ).tolist() )\r\n\r\n # test loading voice preset from the hub\r\n a_ :\tAny =\t\t\t\t\t\t\tprocessor(text=self.input_string\t\t\t\t\t\t\t,\t\tvoice_preset=self.voice_preset )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\t\t\t\t\t_lowerCAmelCase ( self ):\r\n\r\n '''simple docstring'''\r\n\r\n a_ :\tTuple =\t\t\t\t\t\t\tself.get_tokenizer()\r\n\r\n a_ :\tUnion[str, Any] =\t\t\t\t\t\t\tBarkProcessor(tokenizer=lowerCAmelCase_ )\r\n\r\n a_ :\tOptional[int] =\t\t\t\t\t\t\tprocessor(text=self.input_string )\r\n\r\n a_ :\tOptional[int] =\t\t\t\t\t\t\ttokenizer(\r\n self.input_string\t\t\t\t\t\t\t,\t\tpadding=\"\"\"max_length\"\"\"\t\t\t\t\t\t\t,\t\tmax_length=2_56\t\t\t\t\t\t\t,\t\tadd_special_tokens=lowerCAmelCase_\t\t\t\t\t\t\t,\t\treturn_attention_mask=lowerCAmelCase_\t\t\t\t\t\t\t,\t\treturn_token_type_ids=lowerCAmelCase_\t\t\t\t\t\t\t,\t\t)\r\n\r\n for key in encoded_tok.keys():\r\n self.assertListEqual(encoded_tok[key]\t\t\t\t\t\t\t,\t\tencoded_processor[key].squeeze().tolist() )\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":460,"string":"460"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":548,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nUpperCamelCase =\t\t\t\t\t\t\t\"\"\"Tobias Carryer\"\"\"\r\n\r\nfrom time import time\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass UpperCamelCase__\t\t\t:\r\n\r\n\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\tdef __init__( self\t\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t,\tSCREAMING_SNAKE_CASE__=int(time()\t\t)\t\t) ->\tAny: # noqa: B008\r\n\t\t\t\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\tmultiplier\r\n\t\t\t\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\tincrement\r\n\t\t\t\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\tmodulo\r\n\t\t\t\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\tseed\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tdef snake_case__\t( self\t\t) ->\tAny:\r\n\t\t\t\tA__\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t(self.multiplier * self.seed + self.increment) % self.modulo\r\n\t\t\t\treturn self.seed\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t# Show the LCG in action.\r\n\t\t\tUpperCamelCase =\t\t\t\t\t\t\tLinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)\r\n\t\t\twhile True:\r\n\t\t\t\t\t\tprint(lcg.next_number())\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":104,"string":"104"},"style_context":{"kind":"string","value":"\nimport argparse\nimport json\nimport os\n\nimport torch\nfrom torch import nn\n\nfrom transformers import NllbMoeConfig, NllbMoeModel\nfrom transformers.modeling_utils import dtype_byte_size\nfrom transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME\n\n\n\n\ndef \t\t\t\t\t__magic_name__\t\t\t\t\t( __lowerCAmelCase :\t\tAny\t\t\t\t) -> Optional[Any]:\n\t\t\t\t__lowerCamelCase = [\n\t\t\t\t '''encoder.version''',\n\t\t\t\t '''decoder.version''',\n\t\t\t\t '''model.encoder.version''',\n\t\t\t\t '''model.decoder.version''',\n\t\t\t\t '''decoder.output_projection.weight''',\n\t\t\t\t '''_float_tensor''',\n\t\t\t\t '''encoder.embed_positions._float_tensor''',\n\t\t\t\t '''decoder.embed_positions._float_tensor''',\n\t\t\t\t]\n\t\t\t\tfor k in ignore_keys:\n\t\t\t\t\t\t\t\tstate_dict.pop(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\n\n\n\ndef \t\t\t\t\t__magic_name__\t\t\t\t\t( __lowerCAmelCase :\t\tList[str]\t\t\t\t) -> List[str]:\n\t\t\t\t__lowerCamelCase , __lowerCamelCase = emb.weight.shape\n\t\t\t\t__lowerCamelCase = nn.Linear(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t\t\t, bias=__lowerCAmelCase\t\t\t\t)\n\t\t\t\t__lowerCamelCase = emb.weight.data\n\t\t\t\treturn lin_layer\n\n\n\n\ndef \t\t\t\t\t__magic_name__\t\t\t\t\t( __lowerCAmelCase :\t\tList[Any]\t\t\t\t\t\t, __lowerCAmelCase :\t\tint=None\t\t\t\t) -> Optional[int]:\n\t\t\t\t__lowerCamelCase = {}\n\t\t\t\tfor old_key in state_dict.keys():\n\t\t\t\t\t\t\t\t__lowerCamelCase = old_key\n\t\t\t\t\t\t\t\tif \"moe_layer.experts.\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\tif expert_idx is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''moe_layer.experts.0'''\t\t\t\t\t\t, f'''ffn.experts.expert_{expert_idx}'''\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''moe_layer.experts.'''\t\t\t\t\t\t, '''ffn.experts.expert_'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \"gate\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''.moe_layer.gate.wg'''\t\t\t\t\t\t, '''.ffn.router.classifier'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \"fc2\" and \"experts\" not in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''.fc2.'''\t\t\t\t\t\t, '''.ffn.fc2.'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \"fc1\" and \"experts\" not in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''.fc1.'''\t\t\t\t\t\t, '''.ffn.fc1.'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \".encoder_attn.\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''.encoder_attn.'''\t\t\t\t\t\t, '''.cross_attention.'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \"encoder_attn_layer_norm\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''encoder_attn_layer_norm'''\t\t\t\t\t\t, '''cross_attention_layer_norm'''\t\t\t\t)\n\t\t\t\t\t\t\t\tif \"final_layer_norm\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = key.replace('''final_layer_norm'''\t\t\t\t\t\t, '''ff_layer_norm'''\t\t\t\t)\n\t\t\t\t\t\t\t\t__lowerCamelCase = state_dict[old_key]\n\t\t\t\treturn new_dict\n\n\n\n\ndef \t\t\t\t\t__magic_name__\t\t\t\t\t( __lowerCAmelCase :\t\tOptional[Any]\t\t\t\t\t\t, __lowerCAmelCase :\t\tList[str]\t\t\t\t\t\t, __lowerCAmelCase :\t\tUnion[str, Any]\t\t\t\t\t\t, __lowerCAmelCase :\t\tAny\t\t\t\t\t\t, __lowerCAmelCase :\t\tstr = WEIGHTS_NAME\t\t\t\t) -> Dict:\n\t\t\t\t__lowerCamelCase = []\n\t\t\t\t__lowerCamelCase = 0\n\t\t\t\tos.makedirs(__lowerCAmelCase\t\t\t\t\t\t, exist_ok=__lowerCAmelCase\t\t\t\t)\n\n\t\t\t\tfor expert in range(__lowerCAmelCase\t\t\t\t):\n\t\t\t\t\t\t\t\t__lowerCamelCase = switch_checkpoint_path + f'''-rank-{expert}.pt'''\n\t\t\t\t\t\t\t\tif os.path.isfile(__lowerCAmelCase\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = torch.load(__lowerCAmelCase\t\t\t\t)['''model''']\n\t\t\t\t\t\t\t\t\t\t\t\tremove_ignore_keys_(__lowerCAmelCase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = os.path.join(\n\t\t\t\t\t\t\t\t\t\t\t\t __lowerCAmelCase\t\t\t\t\t\t, weights_name.replace('''.bin'''\t\t\t\t\t\t, f'''-{len(__lowerCAmelCase\t\t\t\t)+1:05d}-of-???.bin'''\t\t\t\t)\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\ttorch.save(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\tsharded_state_dicts.append(expert_state.keys()\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\ttotal_size += sum([value.numel() for key, value in expert_state.items()]\t\t\t\t) * dtype_byte_size(\n\t\t\t\t\t\t\t\t\t\t\t\t expert_state[list(__lowerCAmelCase\t\t\t\t)[0]].dtype\t\t\t\t)\n\n # Add the last block\n\t\t\t\t__lowerCamelCase = os.path.join(__lowerCAmelCase\t\t\t\t\t\t, weights_name.replace('''.bin'''\t\t\t\t\t\t, f'''-{len(__lowerCAmelCase\t\t\t\t)+1:05d}-of-???.bin'''\t\t\t\t)\t\t\t\t)\n\t\t\t\t__lowerCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt'''\t\t\t\t)['''model''']\n\t\t\t\tremove_ignore_keys_(__lowerCAmelCase\t\t\t\t)\n\t\t\t\t__lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t__lowerCamelCase = shared_weights['''decoder.embed_tokens.weight''']\n\t\t\t\tsharded_state_dicts.append(shared_weights.keys()\t\t\t\t)\n\n\t\t\t\t# If we only have the shared weights (dummy model/experts saved on the same file)\n\t\t\t\tif len(__lowerCAmelCase\t\t\t\t) == 1:\n\t\t\t\t\t\t\t\t__lowerCamelCase = os.path.join(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t\t\t\t\ttorch.save(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t\t\t\t\treturn {weights_name: sharded_state_dicts[0]}, None\n\t\t\t\telse:\n\t\t\t\t\t\t\t\ttorch.save(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\n\t\t\t\t# Otherwise, let's build the index\n\t\t\t\t__lowerCamelCase = {}\n\t\t\t\tfor idx, shard in enumerate(__lowerCAmelCase\t\t\t\t):\n\t\t\t\t\t\t\t\t__lowerCamelCase = weights_name.replace('''.bin'''\t\t\t\t\t\t, f'''-{idx+1:05d}-of-{len(__lowerCAmelCase\t\t\t\t):05d}.bin'''\t\t\t\t)\n\t\t\t\t\t\t\t\t__lowerCamelCase = os.path.join(__lowerCAmelCase\t\t\t\t\t\t, weights_name.replace('''.bin'''\t\t\t\t\t\t, f'''-{idx+1:05d}-of-???.bin'''\t\t\t\t)\t\t\t\t)\n\t\t\t\t\t\t\t\tos.rename(__lowerCAmelCase\t\t\t\t\t\t, os.path.join(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\t\t\t\t)\n\t\t\t\t\t\t\t\tfor key in shard:\n\t\t\t\t\t\t\t\t\t\t\t\t__lowerCamelCase = shard_file\n\n # Add the metadata\n\t\t\t\t__lowerCamelCase = {'''total_size''': total_size}\n\t\t\t\t__lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}\n\n\t\t\t\twith open(os.path.join(__lowerCAmelCase\t\t\t\t\t\t, __lowerCAmelCase\t\t\t\t)\t\t\t\t\t\t, '''w'''\t\t\t\t\t\t, encoding='''utf-8'''\t\t\t\t) as f:\n\t\t\t\t\t\t\t\t__lowerCamelCase = json.dumps(__lowerCAmelCase\t\t\t\t\t\t, indent=2\t\t\t\t\t\t, sort_keys=__lowerCAmelCase\t\t\t\t) + '''\\n'''\n\t\t\t\t\t\t\t\tf.write(__lowerCAmelCase\t\t\t\t)\n\n\t\t\t\treturn metadata, index\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any] \t\t\t\t= argparse.ArgumentParser()\n\t\t\t\t\t# Required parameters\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t \"--nllb_moe_checkpoint_path\",\n\t\t\t\t\t default=\"/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000\",\n\t\t\t\t\t type=str,\n\t\t\t\t\t required=False,\n\t\t\t\t\t help=\"Path to a directory containing a folder per layer. Follows the original Google format.\",\n\t\t\t\t\t)\n\t\t\t\t\tparser.add_argument(\"--dtype\", default=\"float32\", type=str, required=False, help=\"dtype of the saved model\")\n\t\t\t\t\tparser.add_argument(\n\t\t\t\t\t \"--pytorch_dump_folder_path\",\n\t\t\t\t\t default=\"/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b\",\n\t\t\t\t\t type=str,\n\t\t\t\t\t required=False,\n\t\t\t\t\t help=\"Path to the output pytorch model.\",\n\t\t\t\t\t)\n\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any] \t\t\t\t= parser.parse_args()\n\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t, SCREAMING_SNAKE_CASE__ : Any \t\t\t\t= shard_on_the_fly(\n\t\t\t\t\t args.nllb_moe_checkpoint_path,\n\t\t\t\t\t args.pytorch_dump_folder_path,\n\t\t\t\t\t 128,\n\t\t\t\t\t args.dtype,\n\t\t\t\t\t)\n\n\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str \t\t\t\t= NllbMoeConfig.from_pretrained(\n\t\t\t\t\t \"facebook/nllb-200-3.3B\", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128\n\t\t\t\t\t)\n\t\t\t\t\tconfig.save_pretrained(args.pytorch_dump_folder_path)\n\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict \t\t\t\t= NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)\n\t\t\t\t\tprint(\"Done\")\n\t\t\t\t\tmodel.save_pretrained(args.pytorch_dump_folder_path)\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":298,"string":"298"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":549,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available\r\n\r\n\r\nUpperCAmelCase\t\t\t\t\t\t\t =\t\t\t\t\t{\r\n \"\"\"configuration_altclip\"\"\": [\r\n \"\"\"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP\"\"\",\r\n \"\"\"AltCLIPConfig\"\"\",\r\n \"\"\"AltCLIPTextConfig\"\"\",\r\n \"\"\"AltCLIPVisionConfig\"\"\",\r\n ],\r\n \"\"\"processing_altclip\"\"\": [\"\"\"AltCLIPProcessor\"\"\"],\r\n}\r\n\r\ntry:\r\n\tif not is_torch_available():\r\n\t\traise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n\tpass\r\nelse:\r\n\tUpperCAmelCase\t\t\t\t\t\t\t =\t\t\t\t\t[\r\n\t \"\"\"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST\"\"\",\r\n\t \"\"\"AltCLIPPreTrainedModel\"\"\",\r\n\t \"\"\"AltCLIPModel\"\"\",\r\n\t \"\"\"AltCLIPTextModel\"\"\",\r\n\t \"\"\"AltCLIPVisionModel\"\"\",\r\n\t]\r\n\r\n\r\nif TYPE_CHECKING:\r\n\tfrom .configuration_altclip import (\r\n\t ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,\r\n\t AltCLIPConfig,\r\n\t AltCLIPTextConfig,\r\n\t AltCLIPVisionConfig,\r\n\t)\r\n\tfrom .processing_altclip import AltCLIPProcessor\r\n\r\n\ttry:\r\n\t\tif not is_torch_available():\r\n\t\t\traise OptionalDependencyNotAvailable()\r\n\texcept OptionalDependencyNotAvailable:\r\n\t\tpass\r\n\telse:\r\n\t\tfrom .modeling_altclip import (\r\n\t\t ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n\t\t AltCLIPModel,\r\n\t\t AltCLIPPreTrainedModel,\r\n\t\t AltCLIPTextModel,\r\n\t\t AltCLIPVisionModel,\r\n\t\t)\r\n\r\n\r\nelse:\r\n\timport sys\r\n\r\n\tUpperCAmelCase\t\t\t\t\t\t\t =\t\t\t\t\t_LazyModule(__name__, globals()[\"\"\"__file__\"\"\"], _import_structure, module_spec=__spec__)\r\n\r\n"},"code_codestyle":{"kind":"number","value":351,"string":"351"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import (\r\n MODEL_FOR_OBJECT_DETECTION_MAPPING,\r\n AutoFeatureExtractor,\r\n AutoModelForObjectDetection,\r\n ObjectDetectionPipeline,\r\n is_vision_available,\r\n pipeline,\r\n)\r\nfrom transformers.testing_utils import (\r\n is_pipeline_test,\r\n nested_simplify,\r\n require_pytesseract,\r\n require_tf,\r\n require_timm,\r\n require_torch,\r\n require_vision,\r\n slow,\r\n)\r\n\r\nfrom .test_pipelines_common import ANY\r\n\r\n\r\nif is_vision_available():\r\n\tfrom PIL import Image\r\nelse:\r\n\r\n\tclass __snake_case :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t@staticmethod\r\n\t\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\t*a_ , **a_\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n@is_pipeline_test\r\n@require_vision\r\n@require_timm\r\n@require_torch\r\nclass __snake_case ( unittest.TestCase):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tUpperCamelCase__\t\t\t\t: Dict =\t\t\t\t\tMODEL_FOR_OBJECT_DETECTION_MAPPING\r\n\r\n\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself , a_ , a_ , a_\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tObjectDetectionPipeline(model=a_ , image_processor=a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\treturn object_detector, [\"./tests/fixtures/tests_samples/COCO/000000039769.png\"]\r\n\r\n\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself , a_ , a_\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\"\"\"./tests/fixtures/tests_samples/COCO/000000039769.png\"\"\" , threshold=0.0\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertGreater(len(a_\t\t) , 0\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tfor detected_object in outputs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t a_ , {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"score\"\"\": ANY(a_\t\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"label\"\"\": ANY(a_\t\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"box\"\"\": {\"\"\"xmin\"\"\": ANY(a_\t\t), \"\"\"ymin\"\"\": ANY(a_\t\t), \"\"\"xmax\"\"\": ANY(a_\t\t), \"\"\"ymax\"\"\": ANY(a_\t\t)},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t } , )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\timport datasets\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tdatasets.load_dataset(\"\"\"hf-internal-testing/fixtures_image_utils\"\"\" , \"\"\"image\"\"\" , split=\"\"\"test\"\"\"\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t[\r\n\t\t\t\t\t\t\t\t\t\t\t Image.open(\"\"\"./tests/fixtures/tests_samples/COCO/000000039769.png\"\"\"\t\t),\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t # RGBA\r\n\t\t\t\t\t\t\t\t\t\t\t dataset[0][\"\"\"file\"\"\"],\r\n\t\t\t\t\t\t\t\t\t\t\t # LA\r\n\t\t\t\t\t\t\t\t\t\t\t dataset[1][\"\"\"file\"\"\"],\r\n\t\t\t\t\t\t\t\t\t\t\t # L\r\n\t\t\t\t\t\t\t\t\t\t\t dataset[2][\"\"\"file\"\"\"],\r\n\t\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(a_ , threshold=0.0\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(a_\t\t) , len(a_\t\t)\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tfor outputs in batch_outputs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertGreater(len(a_\t\t) , 0\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor detected_object in outputs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t a_ , {\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"score\"\"\": ANY(a_\t\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"label\"\"\": ANY(a_\t\t),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"\"\"box\"\"\": {\"\"\"xmin\"\"\": ANY(a_\t\t), \"\"\"ymin\"\"\": ANY(a_\t\t), \"\"\"xmax\"\"\": ANY(a_\t\t), \"\"\"ymax\"\"\": ANY(a_\t\t)},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t } , )\r\n\r\n\r\n\t\t\t\t\t\t\t@require_tf\r\n\t\t\t\t\t\t\t@unittest.skip(\"\"\"Object detection not implemented in TF\"\"\"\t\t)\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\tpass\r\n\r\n\r\n\t\t\t\t\t\t\t@require_torch\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t\"\"\"hf-internal-testing/tiny-detr-mobilenetsv3\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tAutoModelForObjectDetection.from_pretrained(a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tAutoFeatureExtractor.from_pretrained(a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tObjectDetectionPipeline(model=a_ , feature_extractor=a_\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\" , threshold=0.0\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t ] , threshold=0.0 , )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.3_376, \"\"\"label\"\"\": \"\"\"LABEL_0\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 159, \"\"\"ymin\"\"\": 120, \"\"\"xmax\"\"\": 480, \"\"\"ymax\"\"\": 359}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\r\n\t\t\t\t\t\t\t@require_torch\r\n\t\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t\"\"\"facebook/detr-resnet-50\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tAutoModelForObjectDetection.from_pretrained(a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tAutoFeatureExtractor.from_pretrained(a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tObjectDetectionPipeline(model=a_ , feature_extractor=a_\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t ]\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\r\n\t\t\t\t\t\t\t@require_torch\r\n\t\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t\"\"\"facebook/detr-resnet-50\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tpipeline(\"\"\"object-detection\"\"\" , model=a_\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\",\r\n\t\t\t\t\t\t\t\t\t\t\t ]\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_982, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 40, \"\"\"ymin\"\"\": 70, \"\"\"xmax\"\"\": 175, \"\"\"ymax\"\"\": 117}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_960, \"\"\"label\"\"\": \"\"\"remote\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 333, \"\"\"ymin\"\"\": 72, \"\"\"xmax\"\"\": 368, \"\"\"ymax\"\"\": 187}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_955, \"\"\"label\"\"\": \"\"\"couch\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 0, \"\"\"ymin\"\"\": 1, \"\"\"xmax\"\"\": 639, \"\"\"ymax\"\"\": 473}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ],\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\r\n\t\t\t\t\t\t\t@require_torch\r\n\t\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t0.9_985\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t\"\"\"facebook/detr-resnet-50\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tpipeline(\"\"\"object-detection\"\"\" , model=a_\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\" , threshold=a_\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_988, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 13, \"\"\"ymin\"\"\": 52, \"\"\"xmax\"\"\": 314, \"\"\"ymax\"\"\": 470}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_987, \"\"\"label\"\"\": \"\"\"cat\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 345, \"\"\"ymin\"\"\": 23, \"\"\"xmax\"\"\": 640, \"\"\"ymax\"\"\": 368}},\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t@require_torch\r\n\t\t\t\t\t\t\t@require_pytesseract\r\n\t\t\t\t\t\t\t@slow\r\n\t\t\t\t\t\t\tdef _a (\t\t\t\t\t\t\tself\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t\"\"\"Narsil/layoutlmv3-finetuned-funsd\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\t0.9_993\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tpipeline(\"\"\"object-detection\"\"\" , model=a_ , threshold=a_\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\ta__\t\t\t\t =\t\tobject_detector(\r\n\t\t\t\t\t\t\t\t\t\t\t \"\"\"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png\"\"\"\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t nested_simplify(a_ , decimals=4\t\t) , [\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_993, \"\"\"label\"\"\": \"\"\"I-ANSWER\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 294, \"\"\"ymin\"\"\": 254, \"\"\"xmax\"\"\": 343, \"\"\"ymax\"\"\": 264}},\r\n\t\t\t\t\t\t\t\t\t\t\t {\"\"\"score\"\"\": 0.9_993, \"\"\"label\"\"\": \"\"\"I-ANSWER\"\"\", \"\"\"box\"\"\": {\"\"\"xmin\"\"\": 294, \"\"\"ymin\"\"\": 254, \"\"\"xmax\"\"\": 343, \"\"\"ymax\"\"\": 264}},\r\n\t\t\t\t\t\t\t\t\t\t\t ] , )\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":351,"string":"351"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":550,"cells":{"code":{"kind":"string","value":"from typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available\n\n\n_lowercase:\tList[Any] \t\t\t=\t\t{\n '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],\n '''tokenization_tapas''': ['''TapasTokenizer'''],\n}\n\ntry:\n\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\tpass\nelse:\n\t\t\t\t_lowercase:\tstr \t\t\t=\t\t[\n\t\t\t\t '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',\n\t\t\t\t '''TapasForMaskedLM''',\n\t\t\t\t '''TapasForQuestionAnswering''',\n\t\t\t\t '''TapasForSequenceClassification''',\n\t\t\t\t '''TapasModel''',\n\t\t\t\t '''TapasPreTrainedModel''',\n\t\t\t\t '''load_tf_weights_in_tapas''',\n\t\t\t\t]\ntry:\n\t\t\t\tif not is_tf_available():\n\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n\t\t\t\tpass\nelse:\n\t\t\t\t_lowercase:\tDict \t\t\t=\t\t[\n\t\t\t\t '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',\n\t\t\t\t '''TFTapasForMaskedLM''',\n\t\t\t\t '''TFTapasForQuestionAnswering''',\n\t\t\t\t '''TFTapasForSequenceClassification''',\n\t\t\t\t '''TFTapasModel''',\n\t\t\t\t '''TFTapasPreTrainedModel''',\n\t\t\t\t]\n\n\nif TYPE_CHECKING:\n\t\t\t\tfrom .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig\n\t\t\t\tfrom .tokenization_tapas import TapasTokenizer\n\n\t\t\t\ttry:\n\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t\t\t\tfrom .modeling_tapas import (\n\t\t\t\t\t\t\t\t TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t TapasForMaskedLM,\n\t\t\t\t\t\t\t\t TapasForQuestionAnswering,\n\t\t\t\t\t\t\t\t TapasForSequenceClassification,\n\t\t\t\t\t\t\t\t TapasModel,\n\t\t\t\t\t\t\t\t TapasPreTrainedModel,\n\t\t\t\t\t\t\t\t load_tf_weights_in_tapas,\n\t\t\t\t\t\t\t\t)\n\n\t\t\t\ttry:\n\t\t\t\t\t\t\t\tif not is_tf_available():\n\t\t\t\t\t\t\t\t\t\t\t\traise OptionalDependencyNotAvailable()\n\t\t\t\texcept OptionalDependencyNotAvailable:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t\t\t\tfrom .modeling_tf_tapas import (\n\t\t\t\t\t\t\t\t TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,\n\t\t\t\t\t\t\t\t TFTapasForMaskedLM,\n\t\t\t\t\t\t\t\t TFTapasForQuestionAnswering,\n\t\t\t\t\t\t\t\t TFTapasForSequenceClassification,\n\t\t\t\t\t\t\t\t TFTapasModel,\n\t\t\t\t\t\t\t\t TFTapasPreTrainedModel,\n\t\t\t\t\t\t\t\t)\n\n\nelse:\n\t\t\t\timport sys\n\n\t\t\t\t_lowercase:\tOptional[Any] \t\t\t=\t\t_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":192,"string":"192"},"style_context":{"kind":"string","value":"import argparse\nimport json\nfrom pathlib import Path\n\nimport requests\nimport torch\nfrom huggingface_hub import hf_hub_download\nfrom PIL import Image\n\nfrom transformers import (\n MobileViTConfig,\n MobileViTForImageClassification,\n MobileViTForSemanticSegmentation,\n MobileViTImageProcessor,\n)\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\n_lowercase:\tDict \t\t\t=\t\tlogging.get_logger(__name__)\n\n\n\n\n\n\ndef _lowerCamelCase (\t\tsnake_case ):\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tMobileViTConfig()\n\n\t\t\t\t\t\t\t# size of the architecture\n\t\t\t\t\t\t\tif \"mobilevit_s\" in mobilevit_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[144, 192, 240]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[16, 32, 64, 96, 128, 160, 640]\n\t\t\t\t\t\t\telif \"mobilevit_xs\" in mobilevit_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[96, 120, 144]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[16, 32, 48, 64, 80, 96, 384]\n\t\t\t\t\t\t\telif \"mobilevit_xxs\" in mobilevit_name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[64, 80, 96]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[16, 16, 24, 48, 64, 80, 320]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t0.05\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t2.0\n\n\t\t\t\t\t\t\tif mobilevit_name.startswith('deeplabv3_' ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t512\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t16\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t21\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'pascal-voc-id2label.json'\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t1_000\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'imagenet-1k-id2label.json'\n\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'huggingface/label-files'\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tjson.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t{int(snake_case ): v for k, v in idalabel.items()}\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tidalabel\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t{v: k for k, v in idalabel.items()}\n\n\t\t\t\t\t\t\treturn config\n\n\n\n\n\n\ndef _lowerCamelCase (\t\tsnake_case , snake_case=False ):\n\t\t\t\t\t\t\tfor i in range(1 , 6 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif F'layer_{i}.' in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )\n\n\t\t\t\t\t\t\tif \"conv_1.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('conv_1.' , 'conv_stem.' )\n\t\t\t\t\t\t\tif \".block.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.block.' , '.' )\n\n\t\t\t\t\t\t\tif \"exp_1x1\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('exp_1x1' , 'expand_1x1' )\n\t\t\t\t\t\t\tif \"red_1x1\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('red_1x1' , 'reduce_1x1' )\n\t\t\t\t\t\t\tif \".local_rep.conv_3x3.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )\n\t\t\t\t\t\t\tif \".local_rep.conv_1x1.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )\n\t\t\t\t\t\t\tif \".norm.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.norm.' , '.normalization.' )\n\t\t\t\t\t\t\tif \".conv.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.conv.' , '.convolution.' )\n\t\t\t\t\t\t\tif \".conv_proj.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.conv_proj.' , '.conv_projection.' )\n\n\t\t\t\t\t\t\tfor i in range(0 , 2 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(0 , 4 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif F'.{i}.{j}.' in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )\n\n\t\t\t\t\t\t\tfor i in range(2 , 6 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(0 , 4 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif F'.{i}.{j}.' in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace(F'.{i}.{j}.' , F'.{i}.' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"expand_1x1\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"conv_3x3\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"reduce_1x1\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )\n\n\t\t\t\t\t\t\tfor i in range(2 , 5 ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif F'.global_rep.{i}.weight' in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif F'.global_rep.{i}.bias' in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )\n\n\t\t\t\t\t\t\tif \".global_rep.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.global_rep.' , '.transformer.' )\n\t\t\t\t\t\t\tif \".pre_norm_mha.0.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.pre_norm_mha.0.' , '.layernorm_before.' )\n\t\t\t\t\t\t\tif \".pre_norm_mha.1.out_proj.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )\n\t\t\t\t\t\t\tif \".pre_norm_ffn.0.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )\n\t\t\t\t\t\t\tif \".pre_norm_ffn.1.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )\n\t\t\t\t\t\t\tif \".pre_norm_ffn.4.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.pre_norm_ffn.4.' , '.output.dense.' )\n\t\t\t\t\t\t\tif \".transformer.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.transformer.' , '.transformer.layer.' )\n\n\t\t\t\t\t\t\tif \".aspp_layer.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.aspp_layer.' , '.' )\n\t\t\t\t\t\t\tif \".aspp_pool.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('.aspp_pool.' , '.' )\n\t\t\t\t\t\t\tif \"seg_head.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('seg_head.' , 'segmentation_head.' )\n\t\t\t\t\t\t\tif \"segmentation_head.classifier.classifier.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )\n\n\t\t\t\t\t\t\tif \"classifier.fc.\" in name:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tname.replace('classifier.fc.' , 'classifier.' )\n\t\t\t\t\t\t\telif (not base_model) and (\"segmentation_head.\" not in name):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'mobilevit.' + name\n\n\t\t\t\t\t\t\treturn name\n\n\n\n\n\n\ndef _lowerCamelCase (\t\tsnake_case , snake_case , snake_case=False ):\n\t\t\t\t\t\t\tif base_model:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t''\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'mobilevit.'\n\n\t\t\t\t\t\t\tfor key in orig_state_dict.copy().keys():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\torig_state_dict.pop(snake_case )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif key[:8] == \"encoder.\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tkey[8:]\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"qkv\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tkey.split('.' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tint(key_split[0][6:] ) - 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tint(key_split[3] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer.transformer.layer[transformer_num].attention.attention.all_head_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"weight\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[:dim, :]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[dim : dim * 2, :]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[-dim:, :]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[:dim]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[dim : dim * 2]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval[-dim:]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tval\n\n\t\t\t\t\t\t\treturn orig_state_dict\n\n\n\n\n\n\ndef _lowerCamelCase (\t\t):\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t'http://images.cocodataset.org/val2017/000000039769.jpg'\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tImage.open(requests.get(snake_case , stream=snake_case ).raw )\n\t\t\t\t\t\t\treturn im\n\n\n\n\n\n\n@torch.no_grad()\ndef _lowerCamelCase (\t\tsnake_case , snake_case , snake_case , snake_case=False ):\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tget_mobilevit_config(snake_case )\n\n\t\t\t\t\t\t\t# load original state_dict\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.load(snake_case , map_location='cpu' )\n\n\t\t\t\t\t\t\t# load 🤗 model\n\t\t\t\t\t\t\tif mobilevit_name.startswith('deeplabv3_' ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tMobileViTForSemanticSegmentation(snake_case ).eval()\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tMobileViTForImageClassification(snake_case ).eval()\n\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tconvert_state_dict(snake_case , snake_case )\n\t\t\t\t\t\t\tmodel.load_state_dict(snake_case )\n\n\t\t\t\t\t\t\t# Check outputs on an image, prepared by MobileViTImageProcessor\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tMobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\timage_processor(images=prepare_img() , return_tensors='pt' )\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel(**snake_case )\n\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\toutputs.logits\n\n\t\t\t\t\t\t\tif mobilevit_name.startswith('deeplabv3_' ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert logits.shape == (1, 21, 32, 32)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif mobilevit_name == \"deeplabv3_mobilevit_s\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif mobilevit_name == \"deeplabv3_mobilevit_xs\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif mobilevit_name == \"deeplabv3_mobilevit_xxs\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert logits.shape == (1, 1_000)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif mobilevit_name == \"mobilevit_s\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor([-0.98_66, 0.23_92, -1.12_41] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif mobilevit_name == \"mobilevit_xs\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor([-2.47_61, -0.93_99, -1.95_87] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif mobilevit_name == \"mobilevit_xxs\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.tensor([-1.93_64, -1.23_27, -0.46_53] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tassert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )\n\n\t\t\t\t\t\t\tPath(snake_case ).mkdir(exist_ok=snake_case )\n\t\t\t\t\t\t\tprint(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )\n\t\t\t\t\t\t\tmodel.save_pretrained(snake_case )\n\t\t\t\t\t\t\tprint(F'Saving image processor to {pytorch_dump_folder_path}' )\n\t\t\t\t\t\t\timage_processor.save_pretrained(snake_case )\n\n\t\t\t\t\t\t\tif push_to_hub:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'mobilevit_s': 'mobilevit-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'mobilevit_xs': 'mobilevit-x-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'mobilevit_xxs': 'mobilevit-xx-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprint('Pushing to the hub...' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_lowerCAmelCase\t\t\t\t\t\t\t=\t\t\t\t\t\t\tmodel_mapping[mobilevit_name]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\timage_processor.push_to_hub(snake_case , organization='apple' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmodel.push_to_hub(snake_case , organization='apple' )\n\n\nif __name__ == \"__main__\":\n\t\t\t\t_lowercase:\tUnion[str, Any] \t\t\t=\t\targparse.ArgumentParser()\n\t\t\t\t# Required parameters\n\t\t\t\tparser.add_argument(\n\t\t\t\t '''--mobilevit_name''',\n\t\t\t\t default='''mobilevit_s''',\n\t\t\t\t type=str,\n\t\t\t\t help=(\n\t\t\t\t '''Name of the MobileViT model you\\'d like to convert. Should be one of \\'mobilevit_s\\', \\'mobilevit_xs\\','''\n\t\t\t\t ''' \\'mobilevit_xxs\\', \\'deeplabv3_mobilevit_s\\', \\'deeplabv3_mobilevit_xs\\', \\'deeplabv3_mobilevit_xxs\\'.'''\n\t\t\t\t ),\n\t\t\t\t)\n\t\t\t\tparser.add_argument(\n\t\t\t\t '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''\n\t\t\t\t)\n\t\t\t\tparser.add_argument(\n\t\t\t\t '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''\n\t\t\t\t)\n\t\t\t\tparser.add_argument(\n\t\t\t\t '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''\n\t\t\t\t)\n\n\t\t\t\t_lowercase:\tList[str] \t\t\t=\t\tparser.parse_args()\n\t\t\t\tconvert_movilevit_checkpoint(\n\t\t\t\t args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub\n\t\t\t\t)\n\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":192,"string":"192"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":551,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nimport inspect\r\nimport unittest\r\n\r\nfrom huggingface_hub import hf_hub_download\r\n\r\nfrom transformers import ASTConfig\r\nfrom transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device\r\nfrom transformers.utils import cached_property, is_torch_available, is_torchaudio_available\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n from torch import nn\r\n\r\n from transformers import ASTForAudioClassification, ASTModel\r\n from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (\r\n AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n )\r\n\r\n\r\nif is_torchaudio_available():\r\n import torchaudio\r\n\r\n from transformers import ASTFeatureExtractor\r\n\r\n\r\nclass __SCREAMING_SNAKE_CASE\t\t\t\t\t:\r\n\r\n\r\n def __init__( self\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tTuple\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tAny=13\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[Any]=2\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tList[str]=24\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint=16\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tTuple=True\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint=True\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[int]=32\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[int]=5\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tAny=4\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tTuple=37\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tList[Any]=\"gelu\"\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tstr=0.1\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tTuple=0.1\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[int]=10\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint=0.02\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tstr=None\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint=2\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint=2\t\t\t\t\t, ):\r\n _UpperCAmelCase\t\t= parent\r\n _UpperCAmelCase\t\t= batch_size\r\n _UpperCAmelCase\t\t= patch_size\r\n _UpperCAmelCase\t\t= max_length\r\n _UpperCAmelCase\t\t= num_mel_bins\r\n _UpperCAmelCase\t\t= is_training\r\n _UpperCAmelCase\t\t= use_labels\r\n _UpperCAmelCase\t\t= hidden_size\r\n _UpperCAmelCase\t\t= num_hidden_layers\r\n _UpperCAmelCase\t\t= num_attention_heads\r\n _UpperCAmelCase\t\t= intermediate_size\r\n _UpperCAmelCase\t\t= hidden_act\r\n _UpperCAmelCase\t\t= hidden_dropout_prob\r\n _UpperCAmelCase\t\t= attention_probs_dropout_prob\r\n _UpperCAmelCase\t\t= type_sequence_label_size\r\n _UpperCAmelCase\t\t= initializer_range\r\n _UpperCAmelCase\t\t= scope\r\n _UpperCAmelCase\t\t= frequency_stride\r\n _UpperCAmelCase\t\t= time_stride\r\n\r\n # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)\r\n _UpperCAmelCase\t\t= (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1\r\n _UpperCAmelCase\t\t= (self.max_length - self.patch_size) // self.time_stride + 1\r\n _UpperCAmelCase\t\t= frequency_out_dimension * time_out_dimension\r\n _UpperCAmelCase\t\t= num_patches + 2\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tTuple ):\r\n _UpperCAmelCase\t\t= floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )\r\n\r\n _UpperCAmelCase\t\t= None\r\n if self.use_labels:\r\n _UpperCAmelCase\t\t= ids_tensor([self.batch_size]\t\t\t\t\t, self.type_sequence_label_size )\r\n\r\n _UpperCAmelCase\t\t= self.get_config()\r\n\r\n return config, input_values, labels\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tUnion[str, Any] ):\r\n return ASTConfig(\r\n patch_size=self.patch_size\t\t\t\t\t, max_length=self.max_length\t\t\t\t\t, num_mel_bins=self.num_mel_bins\t\t\t\t\t, hidden_size=self.hidden_size\t\t\t\t\t, num_hidden_layers=self.num_hidden_layers\t\t\t\t\t, num_attention_heads=self.num_attention_heads\t\t\t\t\t, intermediate_size=self.intermediate_size\t\t\t\t\t, hidden_act=self.hidden_act\t\t\t\t\t, hidden_dropout_prob=self.hidden_dropout_prob\t\t\t\t\t, attention_probs_dropout_prob=self.attention_probs_dropout_prob\t\t\t\t\t, is_decoder=__UpperCamelCase\t\t\t\t\t, initializer_range=self.initializer_range\t\t\t\t\t, frequency_stride=self.frequency_stride\t\t\t\t\t, time_stride=self.time_stride\t\t\t\t\t, )\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tOptional[Any]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[int]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint ):\r\n _UpperCAmelCase\t\t= ASTModel(config=__UpperCamelCase )\r\n model.to(__UpperCamelCase )\r\n model.eval()\r\n _UpperCAmelCase\t\t= model(__UpperCamelCase )\r\n self.parent.assertEqual(result.last_hidden_state.shape\t\t\t\t\t, (self.batch_size, self.seq_length, self.hidden_size) )\r\n\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tOptional[Any] ):\r\n _UpperCAmelCase\t\t= self.prepare_config_and_inputs()\r\n (\r\n (\r\n _UpperCAmelCase\r\n )\t,\t\t(\r\n _UpperCAmelCase\r\n )\t,\t\t(\r\n _UpperCAmelCase\r\n )\t,\t\t\r\n )\t\t= config_and_inputs\r\n _UpperCAmelCase\t\t= {\"input_values\": input_values}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass __SCREAMING_SNAKE_CASE\t\t\t\t\t(\tlowercase , lowercase , unittest.TestCase):\r\n __SCREAMING_SNAKE_CASE : Union[str, Any] = (\r\n (\r\n ASTModel,\r\n ASTForAudioClassification,\r\n )\r\n if is_torch_available()\r\n else ()\r\n )\r\n __SCREAMING_SNAKE_CASE : Tuple = (\r\n {\"\"\"audio-classification\"\"\": ASTForAudioClassification, \"\"\"feature-extraction\"\"\": ASTModel}\r\n if is_torch_available()\r\n else {}\r\n )\r\n __SCREAMING_SNAKE_CASE : Optional[Any] = False\r\n __SCREAMING_SNAKE_CASE : Tuple = False\r\n __SCREAMING_SNAKE_CASE : Tuple = False\r\n __SCREAMING_SNAKE_CASE : int = False\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tAny\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tint\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tDict\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tUnion[str, Any]\t\t\t\t\t, __UpperCamelCase\t\t\t:\t\tOptional[int] ):\r\n if pipeline_test_casse_name == \"AudioClassificationPipelineTests\":\r\n return True\r\n\r\n return False\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tList[str] ):\r\n _UpperCAmelCase\t\t= ASTModelTester(self )\r\n _UpperCAmelCase\t\t= ConfigTester(self\t\t\t\t\t, config_class=__UpperCamelCase\t\t\t\t\t, has_text_modality=__UpperCamelCase\t\t\t\t\t, hidden_size=37 )\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tList[str] ):\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n @unittest.skip(reason=\"AST does not use inputs_embeds\" )\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tUnion[str, Any] ):\r\n pass\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tOptional[int] ):\r\n _UpperCAmelCase\t,\t\t_UpperCAmelCase\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n _UpperCAmelCase\t\t= model_class(__UpperCamelCase )\r\n self.assertIsInstance(model.get_input_embeddings()\t\t\t\t\t, (nn.Module) )\r\n _UpperCAmelCase\t\t= model.get_output_embeddings()\r\n self.assertTrue(x is None or isinstance(__UpperCamelCase\t\t\t\t\t, nn.Linear ) )\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tList[str] ):\r\n _UpperCAmelCase\t,\t\t_UpperCAmelCase\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n _UpperCAmelCase\t\t= model_class(__UpperCamelCase )\r\n _UpperCAmelCase\t\t= inspect.signature(model.forward )\r\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n _UpperCAmelCase\t\t= [*signature.parameters.keys()]\r\n\r\n _UpperCAmelCase\t\t= [\"input_values\"]\r\n self.assertListEqual(arg_names[:1]\t\t\t\t\t, __UpperCamelCase )\r\n\r\n\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tOptional[int] ):\r\n _UpperCAmelCase\t\t= self.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_model(*__UpperCamelCase )\r\n\r\n\r\n\r\n @slow\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tstr ):\r\n for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n _UpperCAmelCase\t\t= ASTModel.from_pretrained(__UpperCamelCase )\r\n self.assertIsNotNone(__UpperCamelCase )\r\n\r\n\r\n\r\n\r\n\r\ndef __lowerCamelCase ( )\t\t\t\t-> int:\r\n _UpperCAmelCase\t\t= hf_hub_download(\r\n repo_id=\"nielsr/audio-spectogram-transformer-checkpoint\"\t, filename=\"sample_audio.flac\"\t, repo_type=\"dataset\" )\r\n\r\n _UpperCAmelCase\t,\t\t_UpperCAmelCase\t\t= torchaudio.load(_lowerCAmelCase )\r\n\r\n return audio, sampling_rate\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_torchaudio\r\nclass __SCREAMING_SNAKE_CASE\t\t\t\t\t(\tunittest.TestCase):\r\n\r\n\r\n @cached_property\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tTuple ):\r\n return (\r\n ASTFeatureExtractor.from_pretrained(\"MIT/ast-finetuned-audioset-10-10-0.4593\" )\r\n if is_torchaudio_available()\r\n else None\r\n )\r\n\r\n\r\n\r\n @slow\r\n def UpperCAmelCase__ ( self\t\t\t:\t\tDict ):\r\n _UpperCAmelCase\t\t= self.default_feature_extractor\r\n _UpperCAmelCase\t\t= ASTForAudioClassification.from_pretrained(\"MIT/ast-finetuned-audioset-10-10-0.4593\" ).to(__UpperCamelCase )\r\n\r\n _UpperCAmelCase\t\t= self.default_feature_extractor\r\n _UpperCAmelCase\t,\t\t_UpperCAmelCase\t\t= prepare_audio()\r\n _UpperCAmelCase\t\t= audio.squeeze().numpy()\r\n _UpperCAmelCase\t\t= feature_extractor(__UpperCamelCase\t\t\t\t\t, sampling_rate=__UpperCamelCase\t\t\t\t\t, return_tensors=\"pt\" ).to(__UpperCamelCase )\r\n\r\n # forward pass\r\n with torch.no_grad():\r\n _UpperCAmelCase\t\t= model(**__UpperCamelCase )\r\n\r\n # verify the logits\r\n _UpperCAmelCase\t\t= torch.Size((1, 527) )\r\n self.assertEqual(outputs.logits.shape\t\t\t\t\t, __UpperCamelCase )\r\n\r\n _UpperCAmelCase\t\t= torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )\r\n\r\n self.assertTrue(torch.allclose(outputs.logits[0, :3]\t\t\t\t\t, __UpperCamelCase\t\t\t\t\t, atol=1e-4 ) )\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":129,"string":"129"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nimport warnings\r\n\r\n\r\nwarnings.warn(\r\n \"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: \"\r\n \"`from accelerate import find_executable_batch_size` to avoid this warning.\",\r\n FutureWarning,\r\n)\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":129,"string":"129"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":552,"cells":{"code":{"kind":"string","value":"\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nimport copy\r\nfrom collections import OrderedDict\r\nfrom typing import Dict, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\nfrom ..auto import CONFIG_MAPPING\r\n\r\n\r\n_a\t\t\t\t\t\t\t\t=\t\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n_a\t\t\t\t\t\t\t\t=\t\t\t\t\t\t{\r\n \"\"\"facebook/detr-resnet-50\"\"\": \"\"\"https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json\"\"\",\r\n # See all DETR models at https://huggingface.co/models?filter=detr\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass _UpperCAmelCase( lowerCamelCase ):\r\n\t\tlowercase__ = 'detr'\r\n\t\tlowercase__ = ['past_key_values']\r\n\t\tlowercase__ = {\r\n\t\t 'hidden_size': 'd_model',\r\n\t\t 'num_attention_heads': 'encoder_attention_heads',\r\n\t\t}\r\n\r\n\r\n\r\n\t\tdef __init__( self\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=True\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=None\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=3\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=1_00\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=6\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=20_48\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=8\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=6\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=20_48\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=8\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.0\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.0\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=True\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=\"relu\"\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=2_56\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.1\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.0\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.0\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.02\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=1.0\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=False\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=\"sine\"\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=\"resnet50\"\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=True\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=False\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=1\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=5\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=2\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=1\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=1\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=5\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=2\t\t\t\t\t\t\t,\t\t\t\t\t\t__a=0.1\t\t\t\t\t\t\t,\t\t\t\t\t\t**__a\t\t\t\t\t\t\t,\t\t\t\t\t\t)\t\t\t->\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\tif backbone_config is not None and use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\t\t\t\traise ValueError('''You can\\'t specify both `backbone_config` and `use_timm_backbone`.''')\r\n\r\n\t\t\t\t\t\t\tif not use_timm_backbone:\r\n\t\t\t\t\t\t\t\t\t\t\t\tif backbone_config is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])\r\n\t\t\t\t\t\t\t\t\t\t\t\telif isinstance(__a\t\t\t\t\t\t\t,\t\t\t\t\t\t__a):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= backbone_config.get('''model_type''')\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= CONFIG_MAPPING[backbone_model_type]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= config_class.from_dict(__a)\r\n\t\t\t\t\t\t\t\t\t\t\t\t# set timm attributes to None\r\n\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t, _UpperCamelCase\t\t\t\t\t, _UpperCamelCase\t\t\t\t\t\t\t\t\t= None, None, None\r\n\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= use_timm_backbone\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= backbone_config\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= num_channels\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= num_queries\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= d_model\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= encoder_ffn_dim\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= encoder_attention_heads\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= decoder_ffn_dim\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= decoder_layers\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= decoder_attention_heads\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= dropout\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= attention_dropout\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= activation_dropout\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= activation_function\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= init_std\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= init_xavier_std\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= encoder_layerdrop\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= decoder_layerdrop\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= encoder_layers\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= auxiliary_loss\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= position_embedding_type\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= backbone\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= use_pretrained_backbone\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= dilation\r\n\t\t\t\t\t\t\t# Hungarian matcher\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= class_cost\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= bbox_cost\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= giou_cost\r\n\t\t\t\t\t\t\t# Loss coefficients\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= mask_loss_coefficient\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= dice_loss_coefficient\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= bbox_loss_coefficient\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= giou_loss_coefficient\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= eos_coefficient\r\n\t\t\t\t\t\t\tsuper().__init__(is_encoder_decoder=__a\t\t\t\t\t\t\t,\t\t\t\t\t\t**__a)\r\n\r\n\r\n\r\n\t\t@property\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn self.encoder_attention_heads\r\n\r\n\r\n\r\n\t\t@property\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn self.d_model\r\n\r\n\r\n\r\n\t\t@classmethod\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( cls\t\t\t\t\t\t\t,\t\t\t\t\t\t__a\t\t\t\t\t\t\t,\t\t\t\t\t\t**__a)\t\t\t->\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn cls(backbone_config=__a\t\t\t\t\t\t\t,\t\t\t\t\t\t**__a)\r\n\r\n\r\n\r\n\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tDict[str, any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= copy.deepcopy(self.__dict__)\r\n\t\t\t\t\t\t\tif output[\"backbone_config\"] is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= self.backbone_config.to_dict()\r\n\t\t\t\t\t\t\t_UpperCamelCase\t\t\t\t\t\t\t\t\t= self.__class__.model_type\r\n\t\t\t\t\t\t\treturn output\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass _UpperCAmelCase( lowerCamelCase ):\r\n\t\tlowercase__ = version.parse('1.11' )\r\n\r\n\r\n\r\n\t\t@property\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tMapping[str, Mapping[int, str]]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),\r\n\t\t\t\t\t\t\t ('''pixel_mask''', {0: '''batch'''}),\r\n\t\t\t\t\t\t\t ])\r\n\r\n\r\n\r\n\t\t@property\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tfloat:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn 1e-5\r\n\r\n\r\n\r\n\r\n\t\t@property\r\n\t\tdef \t\tUpperCAmelCase\t\t\t\t( self)\t\t\t->\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\t\t\t\t\t\t\treturn 12\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":19,"string":"19"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef UpperCAmelCase__\t\t(\t\t\t\t\t\tlowerCamelCase_ : list[int] , lowerCamelCase_ : list[int]\t\t\t\t):\r\n\r\n\t\t\t# Check if the input is valid\r\n\t\t\tif not len(lowerCamelCase_\t\t\t\t) == len(lowerCamelCase_\t\t\t\t) == 3:\r\n\t\t\t\t\t\traise ValueError('Please enter a valid equation.'\t\t\t\t)\r\n\t\t\tif equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:\r\n\t\t\t\t\t\traise ValueError('Both a & b of two equations can\\'t be zero.'\t\t\t\t)\r\n\r\n\t\t\t# Extract the coefficients\r\n\t\t\t__a\t\t\t\t\t\t,\t\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t\t__a\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t= equationa\r\n\t\t\t__a\t\t\t\t\t\t,\t\t\t\t\t__a\t\t\t\t\t\t,\t\t\t\t\t__a\t\t\t\t\t\t: Optional[int]\t\t\t\t\t\t\t\t\t= equationa\r\n\r\n\t\t\t# Calculate the determinants of the matrices\r\n\t\t\t__a\t\t\t\t\t\t: str\t\t\t\t\t\t\t\t\t= aa * ba - aa * ba\r\n\t\t\t__a\t\t\t\t\t\t: Tuple\t\t\t\t\t\t\t\t\t= ca * ba - ca * ba\r\n\t\t\t__a\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t\t\t= aa * ca - aa * ca\r\n\r\n\t\t\t# Check if the system of linear equations has a solution (using Cramer's rule)\r\n\t\t\tif determinant == 0:\r\n\t\t\t\t\t\tif determinant_x == determinant_y == 0:\r\n\t\t\t\t\t\t\t\t\traise ValueError('Infinite solutions. (Consistent system)'\t\t\t\t)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\traise ValueError('No solution. (Inconsistent system)'\t\t\t\t)\r\n\t\t\telse:\r\n\t\t\t\t\t\tif determinant_x == determinant_y == 0:\r\n\t\t\t\t\t\t\t\t\t# Trivial solution (Inconsistent system)\r\n\t\t\t\t\t\t\t\t\treturn (0.0, 0.0)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t: Any\t\t\t\t\t\t\t\t\t= determinant_x / determinant\r\n\t\t\t\t\t\t\t\t\t__a\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t\t\t\t\t= determinant_y / determinant\r\n\t\t\t\t\t\t\t\t\t# Non-Trivial Solution (Consistent system)\r\n\t\t\t\t\t\t\t\t\treturn (x, y)\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":47,"string":"47"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":553,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom transformers.testing_utils import is_flaky, require_torch, require_vision\r\nfrom transformers.utils import is_torch_available, is_vision_available\r\n\r\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\r\nif is_vision_available():\r\n\t\t\t\tfrom PIL import Image\r\n\r\n\t\t\t\tfrom transformers import DonutImageProcessor\r\nclass __lowerCAmelCase ( unittest.TestCase ):\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\tdef __init__(\t\t\t\t\t\tself\t\t\t\t\t\t\t,\t\t\t\t_a\t\t\t\t\t\t\t,\t\t\t\t_a=7\t\t\t\t\t\t\t,\t\t\t\t_a=3\t\t\t\t\t\t\t,\t\t\t\t_a=18\t\t\t\t\t\t\t,\t\t\t\t_a=30\t\t\t\t\t\t\t,\t\t\t\t_a=400\t\t\t\t\t\t\t,\t\t\t\t_a=True\t\t\t\t\t\t\t,\t\t\t\t_a=None\t\t\t\t\t\t\t,\t\t\t\t_a=True\t\t\t\t\t\t\t,\t\t\t\t_a=False\t\t\t\t\t\t\t,\t\t\t\t_a=True\t\t\t\t\t\t\t,\t\t\t\t_a=True\t\t\t\t\t\t\t,\t\t\t\t_a=[0.5, 0.5, 0.5]\t\t\t\t\t\t\t,\t\t\t\t_a=[0.5, 0.5, 0.5]\t\t\t\t\t\t\t,\t\t\t\t):\r\n\t\t\t\t__a = parent\r\n\t\t\t\t__a = batch_size\r\n\t\t\t\t__a = num_channels\r\n\t\t\t\t__a = image_size\r\n\t\t\t\t__a = min_resolution\r\n\t\t\t\t__a = max_resolution\r\n\t\t\t\t__a = do_resize\r\n\t\t\t\t__a = size if size is not None else {'''height''': 18, '''width''': 20}\r\n\t\t\t\t__a = do_thumbnail\r\n\t\t\t\t__a = do_align_axis\r\n\t\t\t\t__a = do_pad\r\n\t\t\t\t__a = do_normalize\r\n\t\t\t\t__a = image_mean\r\n\t\t\t\t__a = image_std\r\n\r\n\r\n\r\n\r\n\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\treturn {\r\n\t\t\t\t \"do_resize\": self.do_resize,\r\n\t\t\t\t \"size\": self.size,\r\n\t\t\t\t \"do_thumbnail\": self.do_thumbnail,\r\n\t\t\t\t \"do_align_long_axis\": self.do_align_axis,\r\n\t\t\t\t \"do_pad\": self.do_pad,\r\n\t\t\t\t \"do_normalize\": self.do_normalize,\r\n\t\t\t\t \"image_mean\": self.image_mean,\r\n\t\t\t\t \"image_std\": self.image_std,\r\n\t\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\t__UpperCAmelCase\t: str \t\t= DonutImageProcessor if is_vision_available() else None\r\n\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t__a = DonutImageProcessingTester(self )\r\n\r\n\t\t@property\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\treturn self.image_processor_tester.prepare_image_processor_dict()\r\n\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t__a = self.image_processing_class(**self.image_processor_dict )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''do_resize''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''size''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''do_thumbnail''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''do_align_long_axis''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''do_pad''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''do_normalize''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''image_mean''' ) )\r\n\t\t\t\tself.assertTrue(hasattr(_a\t\t\t\t\t\t\t,\t\t\t\t'''image_std''' ) )\r\n\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t__a = self.image_processing_class.from_dict(self.image_processor_dict )\r\n\t\t\t\tself.assertEqual(image_processor.size\t\t\t\t\t\t\t,\t\t\t\t{'''height''': 18, '''width''': 20} )\r\n\r\n\t\t\t\t__a = self.image_processing_class.from_dict(self.image_processor_dict\t\t\t\t\t\t\t,\t\t\t\tsize=42 )\r\n\t\t\t\tself.assertEqual(image_processor.size\t\t\t\t\t\t\t,\t\t\t\t{'''height''': 42, '''width''': 42} )\r\n\r\n\t\t\t\t# Previous config had dimensions in (width, height) order\r\n\t\t\t\t__a = self.image_processing_class.from_dict(self.image_processor_dict\t\t\t\t\t\t\t,\t\t\t\tsize=(42, 84) )\r\n\t\t\t\tself.assertEqual(image_processor.size\t\t\t\t\t\t\t,\t\t\t\t{'''height''': 84, '''width''': 42} )\r\n\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\tpass\r\n\r\n\t\t@is_flaky()\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t# Initialize image_processing\r\n\t\t\t\t__a = self.image_processing_class(**self.image_processor_dict )\r\n\t\t\t\t# create random PIL images\r\n\t\t\t\t__a = prepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t\t,\t\t\t\tequal_resolution=_a )\r\n\t\t\t\tfor image in image_inputs:\r\n\t\t\t\t\t\tself.assertIsInstance(_a\t\t\t\t\t\t\t,\t\t\t\tImage.Image )\r\n\r\n\t\t\t\t# Test not batched input\r\n\t\t\t\t__a = image_processing(image_inputs[0]\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t 1,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\t\t\t\t# Test batched\r\n\t\t\t\t__a = image_processing(_a\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t self.image_processor_tester.batch_size,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\t\t@is_flaky()\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t# Initialize image_processing\r\n\t\t\t\t__a = self.image_processing_class(**self.image_processor_dict )\r\n\t\t\t\t# create random numpy tensors\r\n\t\t\t\t__a = prepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t\t,\t\t\t\tequal_resolution=_a\t\t\t\t\t\t\t,\t\t\t\tnumpify=_a )\r\n\t\t\t\tfor image in image_inputs:\r\n\t\t\t\t\t\tself.assertIsInstance(_a\t\t\t\t\t\t\t,\t\t\t\tnp.ndarray )\r\n\r\n\t\t\t\t# Test not batched input\r\n\t\t\t\t__a = image_processing(image_inputs[0]\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t 1,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\t\t\t\t# Test batched\r\n\t\t\t\t__a = image_processing(_a\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t self.image_processor_tester.batch_size,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\t\t@is_flaky()\r\n\t\tdef __UpperCAmelCase (\t\t\t\t\t\tself ):\r\n\t\t\t\t# Initialize image_processing\r\n\t\t\t\t__a = self.image_processing_class(**self.image_processor_dict )\r\n\t\t\t\t# create random PyTorch tensors\r\n\t\t\t\t__a = prepare_image_inputs(self.image_processor_tester\t\t\t\t\t\t\t,\t\t\t\tequal_resolution=_a\t\t\t\t\t\t\t,\t\t\t\ttorchify=_a )\r\n\t\t\t\tfor image in image_inputs:\r\n\t\t\t\t\t\tself.assertIsInstance(_a\t\t\t\t\t\t\t,\t\t\t\ttorch.Tensor )\r\n\r\n\t\t\t\t# Test not batched input\r\n\t\t\t\t__a = image_processing(image_inputs[0]\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t 1,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\t\t\t\t# Test batched\r\n\t\t\t\t__a = image_processing(_a\t\t\t\t\t\t\t,\t\t\t\treturn_tensors='''pt''' ).pixel_values\r\n\t\t\t\tself.assertEqual(\r\n\t\t\t\t encoded_images.shape\t\t\t\t\t\t\t,\t\t\t\t(\r\n\t\t\t\t self.image_processor_tester.batch_size,\r\n\t\t\t\t self.image_processor_tester.num_channels,\r\n\t\t\t\t self.image_processor_tester.size['''height'''],\r\n\t\t\t\t self.image_processor_tester.size['''width'''],\r\n\t\t\t\t )\t\t\t\t\t\t\t,\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":65,"string":"65"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\nimport warnings\r\n\r\nfrom ..trainer import Trainer\r\nfrom ..utils import logging\r\n\r\n\r\nlowercase_ =\t\t\t\t\t\tlogging.get_logger(__name__)\r\nclass __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):\r\n\r\n\r\n\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\t\tdef __init__(\t\t\t\t\t\tself\t\t\t\t\t\t\t,\t\t\t\t_a=None\t\t\t\t\t\t\t,\t\t\t\t**_a ):\r\n\t\t\t\twarnings.warn(\r\n\t\t\t\t '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''\r\n\t\t\t\t '''instead.'''\t\t\t\t\t\t\t,\t\t\t\t_a\t\t\t\t\t\t\t,\t\t\t\t)\r\n\t\t\t\tsuper().__init__(args=_a\t\t\t\t\t\t\t,\t\t\t\t**_a )\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":65,"string":"65"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":554,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r\rfrom __future__ import annotations\r\rfrom collections.abc import Callable\rfrom typing import Generic, TypeVar\r\r__SCREAMING_SNAKE_CASE =TypeVar(\"T\")\r__SCREAMING_SNAKE_CASE =TypeVar(\"U\")\r\r\r\r\r\r\r\rclass \t\t\tUpperCamelCase\t\t\t\t\t\t( Generic[T, U] ):\r def __init__( self\t,__UpperCamelCase\t,__UpperCamelCase ) -> List[Any]:\r\r\r '''simple docstring'''\r\r\r lowercase_ :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t= key\r lowercase_ :\t\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t= val\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedListNode[T, U] | None\t\t\t\t\t\t= None\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedListNode[T, U] | None\t\t\t\t\t\t= None\r\r\r\r\r\r\r\r def __repr__( self ) -> str:\r\r\r '''simple docstring'''\r\r\r return (\r f'''Node: key: {self.key}, val: {self.val}, '''\r f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''\r )\r\r\r\r\r\r\r\rclass \t\t\tUpperCamelCase\t\t\t\t\t\t( Generic[T, U] ):\r def __init__( self ) -> None:\r\r\r '''simple docstring'''\r\r\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedListNode[T, U]\t\t\t\t\t\t= DoubleLinkedListNode(lowerCAmelCase_\t,lowerCAmelCase_ )\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedListNode[T, U]\t\t\t\t\t\t= DoubleLinkedListNode(lowerCAmelCase_\t,lowerCAmelCase_ )\r lowercase_ :\t\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t= self.rear, self.head\r def __repr__( self ) -> str:\r\r\r '''simple docstring'''\r\r\r lowercase_ :\t\t\t\t\t\t\tTuple\t\t\t\t\t\t= [\"DoubleLinkedList\"]\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= self.head\r while node.next is not None:\r rep.append(str(lowerCAmelCase_ ) )\r lowercase_ :\t\t\t\t\t\t\tTuple\t\t\t\t\t\t= node.next\r rep.append(str(self.rear ) )\r return \",\\n \".join(lowerCAmelCase_ )\r def _UpperCAmelCase ( self\t,__UpperCamelCase ) -> None:\r\r\r '''simple docstring'''\r\r\r lowercase_ :\t\t\t\t\t\t\tDict\t\t\t\t\t\t= self.rear.prev\r\r # All nodes other than self.head are guaranteed to have non-None previous\r assert previous is not None\r\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= node\r lowercase_ :\t\t\t\t\t\t\tAny\t\t\t\t\t\t= previous\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= node\r lowercase_ :\t\t\t\t\t\t\tDict\t\t\t\t\t\t= self.rear\r\r\r\r\r\r\r\r def _UpperCAmelCase ( self\t,__UpperCamelCase ) -> DoubleLinkedListNode[T, U] | None:\r\r\r '''simple docstring'''\r\r\r if node.prev is None or node.next is None:\r return None\r\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= node.next\r lowercase_ :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t= node.prev\r lowercase_ :\t\t\t\t\t\t\tList[str]\t\t\t\t\t\t= None\r lowercase_ :\t\t\t\t\t\t\tstr\t\t\t\t\t\t= None\r return node\r\r\r\r\r\r\r\rclass \t\t\tUpperCamelCase\t\t\t\t\t\t( Generic[T, U] ):\r lowercase\t\t\t\t= {}\r def __init__( self\t,__UpperCamelCase ) -> List[Any]:\r\r\r '''simple docstring'''\r\r\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedList[T, U]\t\t\t\t\t\t= DoubleLinkedList()\r lowercase_ :\t\t\t\t\t\t\tList[str]\t\t\t\t\t\t= capacity\r lowercase_ :\t\t\t\t\t\t\tTuple\t\t\t\t\t\t= 0\r lowercase_ :\t\t\t\t\t\t\tList[str]\t\t\t\t\t\t= 0\r lowercase_ :\t\t\t\t\t\t\tList[str]\t\t\t\t\t\t= 0\r lowercase_ :\t\t\t\t\t\t\tdict[T, DoubleLinkedListNode[T, U]]\t\t\t\t\t\t= {}\r def __repr__( self ) -> str:\r\r\r '''simple docstring'''\r\r\r return (\r f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''\r f'''capacity={self.capacity}, current size={self.num_keys})'''\r )\r def __contains__( self\t,__UpperCamelCase ) -> bool:\r\r\r '''simple docstring'''\r\r\r return key in self.cache\r def _UpperCAmelCase ( self\t,__UpperCamelCase ) -> U | None:\r\r\r '''simple docstring'''\r\r\r if key in self.cache:\r self.hits += 1\r lowercase_ :\t\t\t\t\t\t\tDoubleLinkedListNode[T, U]\t\t\t\t\t\t= self.cache[key]\r lowercase_ :\t\t\t\t\t\t\tint\t\t\t\t\t\t= self.list.remove(self.cache[key] )\r assert node == value_node\r\r # node is guaranteed not None because it is in self.cache\r assert node is not None\r self.list.add(lowerCAmelCase_ )\r return node.val\r self.miss += 1\r return None\r def _UpperCAmelCase ( self\t,__UpperCamelCase\t,__UpperCamelCase ) -> None:\r\r\r '''simple docstring'''\r\r\r if key not in self.cache:\r if self.num_keys >= self.capacity:\r # delete first node (oldest) when over capacity\r lowercase_ :\t\t\t\t\t\t\tstr\t\t\t\t\t\t= self.list.head.next\r\r # guaranteed to have a non-None first node when num_keys > 0\r # explain to type checker via assertions\r assert first_node is not None\r assert first_node.key is not None\r assert (\r self.list.remove(lowerCAmelCase_ ) is not None\r ) # node guaranteed to be in list assert node.key is not None\r\r del self.cache[first_node.key]\r self.num_keys -= 1\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= DoubleLinkedListNode(lowerCAmelCase_\t,lowerCAmelCase_ )\r self.list.add(self.cache[key] )\r self.num_keys += 1\r\r else:\r # bump node to the end of the list, update value\r lowercase_ :\t\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t= self.list.remove(self.cache[key] )\r assert node is not None # node guaranteed to be in list\r lowercase_ :\t\t\t\t\t\t\tstr\t\t\t\t\t\t= value\r self.list.add(lowerCAmelCase_ )\r\r\r\r\r\r\r\r @classmethod\r def _UpperCAmelCase ( cls\t,__UpperCamelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:\r\r\r '''simple docstring'''\r\r\r def cache_decorator_inner(__UpperCamelCase ) -> Callable[..., U]:\r def cache_decorator_wrapper(*__UpperCamelCase ) -> U:\r if func not in cls.decorator_function_to_instance_map:\r lowercase_ :\t\t\t\t\t\t\tAny\t\t\t\t\t\t= LRUCache(lowerCAmelCase_ )\r\r lowercase_ :\t\t\t\t\t\t\tList[Any]\t\t\t\t\t\t= cls.decorator_function_to_instance_map[func].get(args[0] )\r if result is None:\r lowercase_ :\t\t\t\t\t\t\tstr\t\t\t\t\t\t= func(*lowerCAmelCase_ )\r cls.decorator_function_to_instance_map[func].put(args[0]\t,lowerCAmelCase_ )\r return result\r\r def cache_info() -> LRUCache[T, U]:\r return cls.decorator_function_to_instance_map[func]\r\r setattr(lowerCAmelCase_\t,'cache_info'\t,lowerCAmelCase_ ) # noqa: B010\r\r return cache_decorator_wrapper\r\r return cache_decorator_inner\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r\r"},"code_codestyle":{"kind":"number","value":425,"string":"425"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport requests\nimport torch\nfrom huggingface_hub import hf_hub_download\nfrom PIL import Image\n\nfrom transformers import (\n SwiftFormerConfig,\n SwiftFormerForImageClassification,\n ViTImageProcessor,\n)\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\nlowerCamelCase_\t\t\t\t\t\t\t = logging.get_logger(__name__)\n\nlowerCamelCase_\t\t\t\t\t\t\t = torch.device('''cpu''')\n\ndef \t\t\t\t\t\t\tsnake_case ( ):\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tstr \t= \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tstr \t= Image.open(requests.get(A__ ,stream=A__\t\t).raw\t\t)\n\t\t\treturn im\n\ndef \t\t\t\t\t\t\tsnake_case ( A__\t\t):\n\t\t\tif swiftformer_name == \"swiftformer_xs\":\n\t\t\t\t\t\treturn torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01]\t\t)\n\n\t\t\telif swiftformer_name == \"swiftformer_s\":\n\t\t\t\t\t\treturn torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01]\t\t)\n\n\t\t\telif swiftformer_name == \"swiftformer_l1\":\n\t\t\t\t\t\treturn torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02]\t\t)\n\n\t\t\telif swiftformer_name == \"swiftformer_l3\":\n\t\t\t\t\t\treturn torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02]\t\t)\n\ndef \t\t\t\t\t\t\tsnake_case ( A__ ,A__ ,A__\t\t):\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= dct.pop(A__\t\t)\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= val\n\ndef \t\t\t\t\t\t\tsnake_case ( A__\t\t):\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[str] \t= []\n\t\t\tfor k in state_dict.keys():\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tUnion[str, Any] \t= k\n\t\t\t\t\t\tif \".pwconv\" in k:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tDict \t= k_new.replace(\".pwconv\" ,\".point_wise_conv\"\t\t)\n\t\t\t\t\t\tif \".dwconv\" in k:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tAny \t= k_new.replace(\".dwconv\" ,\".depth_wise_conv\"\t\t)\n\t\t\t\t\t\tif \".Proj.\" in k:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tDict \t= k_new.replace(\".Proj.\" ,\".proj.\"\t\t)\n\t\t\t\t\t\tif \"patch_embed\" in k_new:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= k_new.replace(\"patch_embed\" ,\"swiftformer.patch_embed.patch_embedding\"\t\t)\n\t\t\t\t\t\tif \"network\" in k_new:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[Any] \t= k_new.split(\".\"\t\t)\n\t\t\t\t\t\t\t\t\tif ls[2].isdigit():\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= \"swiftformer.encoder.network.\" + ls[1] + \".blocks.\" + ls[2] + \".\" + \".\".join(ls[3:]\t\t)\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= k_new.replace(\"network\" ,\"swiftformer.encoder.network\"\t\t)\n\t\t\t\t\t\trename_keys.append((k, k_new)\t\t)\n\t\t\treturn rename_keys\n\n@torch.no_grad()\ndef \t\t\t\t\t\t\tsnake_case ( A__ ,A__ ,A__\t\t):\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[int] \t= SwiftFormerConfig()\n\n\t\t\t# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= 10_00\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tstr \t= \"huggingface/label-files\"\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tstr \t= \"imagenet-1k-id2label.json\"\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[str] \t= json.load(open(hf_hub_download(A__ ,A__ ,repo_type=\"dataset\"\t\t) ,\"r\"\t\t)\t\t)\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= {int(A__\t\t): v for k, v in idalabel.items()}\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[Any] \t= idalabel\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= {v: k for k, v in idalabel.items()}\n\n\t\t\t# size of the architecture\n\t\t\tif swiftformer_name == \"swiftformer_xs\":\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= [3, 3, 6, 4]\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tstr \t= [48, 56, 1_12, 2_20]\n\n\t\t\telif swiftformer_name == \"swiftformer_s\":\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= [3, 3, 9, 6]\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[Any] \t= [48, 64, 1_68, 2_24]\n\n\t\t\telif swiftformer_name == \"swiftformer_l1\":\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tint \t= [4, 3, 10, 5]\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tUnion[str, Any] \t= [48, 96, 1_92, 3_84]\n\n\t\t\telif swiftformer_name == \"swiftformer_l3\":\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tDict \t= [4, 4, 12, 6]\n\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[int] \t= [64, 1_28, 3_20, 5_12]\n\n\t\t\t# load state_dict of original model, remove and rename some keys\n\t\t\tif original_ckpt:\n\t\t\t\t\t\tif original_ckpt.startswith(\"https\"\t\t):\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[Any] \t= torch.hub.load_state_dict_from_url(A__ ,map_location=\"cpu\" ,check_hash=A__\t\t)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tUpperCAmelCase_\t:\t\t\t\t\tAny \t= torch.load(A__ ,map_location=\"cpu\"\t\t)\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[str] \t= checkpoint\n\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tDict \t= create_rename_keys(A__\t\t)\n\t\t\tfor rename_key_src, rename_key_dest in rename_keys:\n\t\t\t\t\t\trename_key(A__ ,A__ ,A__\t\t)\n\n\t\t\t# load HuggingFace model\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tOptional[int] \t= SwiftFormerForImageClassification(A__\t\t).eval()\n\t\t\thf_model.load_state_dict(A__\t\t)\n\n\t\t\t# prepare test inputs\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tTuple \t= prepare_img()\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tint \t= ViTImageProcessor.from_pretrained(\"preprocessor_config\"\t\t)\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tint \t= processor(images=A__ ,return_tensors=\"pt\"\t\t)\n\n\t\t\t# compare outputs from both models\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tList[Any] \t= get_expected_output(A__\t\t)\n\t\t\tUpperCAmelCase_\t:\t\t\t\t\tint \t= hf_model(inputs[\"pixel_values\"]\t\t).logits\n\n\t\t\tassert hf_logits.shape == torch.Size([1, 10_00]\t\t)\n\t\t\tassert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3\t\t)\n\n\t\t\tPath(A__\t\t).mkdir(exist_ok=A__\t\t)\n\t\t\tprint(F\"\"\"Saving model {swiftformer_name} to {pytorch_dump_folder_path}\"\"\"\t\t)\n\t\t\thf_model.save_pretrained(A__\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\tlowerCamelCase_\t\t\t\t\t\t\t = argparse.ArgumentParser()\n\t\t\t# Required parameters\n\t\t\tparser.add_argument(\n\t\t\t '''--swiftformer_name''',\n\t\t\t default='''swiftformer_xs''',\n\t\t\t choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],\n\t\t\t type=str,\n\t\t\t help='''Name of the SwiftFormer model you\\'d like to convert.''',\n\t\t\t)\n\t\t\tparser.add_argument(\n\t\t\t '''--pytorch_dump_folder_path''',\n\t\t\t default='''./converted_outputs/''',\n\t\t\t type=str,\n\t\t\t help='''Path to the output PyTorch model directory.''',\n\t\t\t)\n\t\t\tparser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')\n\n\t\t\tlowerCamelCase_\t\t\t\t\t\t\t = parser.parse_args()\n\t\t\tconvert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":95,"string":"95"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":555,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available\n\n\n__A : Any\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t{\n \"configuration_groupvit\": [\n \"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP\",\n \"GroupViTConfig\",\n \"GroupViTOnnxConfig\",\n \"GroupViTTextConfig\",\n \"GroupViTVisionConfig\",\n ],\n}\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __A : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[\n \"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST\",\n \"GroupViTModel\",\n \"GroupViTPreTrainedModel\",\n \"GroupViTTextModel\",\n \"GroupViTVisionModel\",\n ]\n\ntry:\n if not is_tf_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n __A : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[\n \"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST\",\n \"TFGroupViTModel\",\n \"TFGroupViTPreTrainedModel\",\n \"TFGroupViTTextModel\",\n \"TFGroupViTVisionModel\",\n ]\n\nif TYPE_CHECKING:\n from .configuration_groupvit import (\n GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,\n GroupViTConfig,\n GroupViTOnnxConfig,\n GroupViTTextConfig,\n GroupViTVisionConfig,\n )\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_groupvit import (\n GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,\n GroupViTModel,\n GroupViTPreTrainedModel,\n GroupViTTextModel,\n GroupViTVisionModel,\n )\n\n try:\n if not is_tf_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_tf_groupvit import (\n TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,\n TFGroupViTModel,\n TFGroupViTPreTrainedModel,\n TFGroupViTTextModel,\n TFGroupViTVisionModel,\n )\n\nelse:\n import sys\n\n __A : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":595,"string":"595"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\n\n\n\nfrom __future__ import annotations\n\n__A : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[]\n\n\n\ndef \t\tlowercase (\t\t\t\t\tUpperCamelCase : list[list[int]]\t\t\t\t\t, UpperCamelCase : int\t\t\t\t\t, UpperCamelCase : int\t\t):\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n for i in range(len(UpperCamelCase\t\t)\t\t):\n if board[row][i] == 1:\n return False\n for i in range(len(UpperCamelCase\t\t)\t\t):\n if board[i][column] == 1:\n return False\n for i, j in zip(range(UpperCamelCase\t\t\t\t\t, -1\t\t\t\t\t, -1\t\t)\t\t\t\t\t, range(UpperCamelCase\t\t\t\t\t, -1\t\t\t\t\t, -1\t\t)\t\t):\n if board[i][j] == 1:\n return False\n for i, j in zip(range(UpperCamelCase\t\t\t\t\t, -1\t\t\t\t\t, -1\t\t)\t\t\t\t\t, range(UpperCamelCase\t\t\t\t\t, len(UpperCamelCase\t\t)\t\t)\t\t):\n if board[i][j] == 1:\n return False\n return True\n\n\n\ndef \t\tlowercase (\t\t\t\t\tUpperCamelCase : list[list[int]]\t\t\t\t\t, UpperCamelCase : int\t\t):\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n if row >= len(UpperCamelCase\t\t):\n solution.append(UpperCamelCase\t\t)\n printboard(UpperCamelCase\t\t)\n print()\n return True\n for i in range(len(UpperCamelCase\t\t)\t\t):\n if is_safe(UpperCamelCase\t\t\t\t\t, UpperCamelCase\t\t\t\t\t, UpperCamelCase\t\t):\n A__ :\t\t\tOptional[Any]\t\t\t=1\n solve(UpperCamelCase\t\t\t\t\t, row + 1\t\t)\n A__ :\t\t\tUnion[str, Any]\t\t\t=0\n return False\n\n\n\ndef \t\tlowercase (\t\t\t\t\tUpperCamelCase : list[list[int]]\t\t):\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n for i in range(len(UpperCamelCase\t\t)\t\t):\n for j in range(len(UpperCamelCase\t\t)\t\t):\n if board[i][j] == 1:\n print(\"Q\"\t\t\t\t\t, end=\" \"\t\t)\n else:\n print(\".\"\t\t\t\t\t, end=\" \"\t\t)\n print()\n\n\n# n=int(input(\"The no. of queens\"))\n__A : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t8\n__A : Dict\t\t\t\t\t\t\t\t\t\t\t\t=\t\t\t\t\t\t\t[[0 for i in range(n)] for j in range(n)]\nsolve(board, 0)\n\nprint(\"The total no. of solutions are :\", len(solution))\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":595,"string":"595"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":556,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport logging\r\nimport os\r\nimport socket\r\n\r\nimport git\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\nlogging.basicConfig(\r\n format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',\r\n datefmt='''%m/%d/%Y %H:%M:%S''',\r\n level=logging.INFO,\r\n)\r\nsnake_case \t\t\t\t\t=\t\t\t\tlogging.getLogger(__name__)\r\n\r\n\r\n\r\n\r\ndef \t\t\tsnake_case\t\t(\t\tlowerCAmelCase_\t)\t\t\t\t\t\t->\t\t\t\tList[str]:\r\n _snake_case \t= git.Repo(search_parent_directories=lowerCAmelCase_\t)\r\n _snake_case \t= {\r\n '''repo_id''': str(lowerCAmelCase_\t),\r\n '''repo_sha''': str(repo.head.object.hexsha\t),\r\n '''repo_branch''': str(repo.active_branch\t),\r\n }\r\n\r\n with open(os.path.join(lowerCAmelCase_ ,\t\t\t\t\t\t'''git_log.json'''\t) ,\t\t\t\t\t\t'''w'''\t) as f:\r\n json.dump(lowerCAmelCase_ ,\t\t\t\t\t\tlowerCAmelCase_ ,\t\t\t\t\t\tindent=4\t)\r\n\r\n\r\n\r\n\r\ndef \t\t\tsnake_case\t\t(\t\tlowerCAmelCase_\t)\t\t\t\t\t\t->\t\t\t\tint:\r\n if params.n_gpu <= 0:\r\n _snake_case \t= 0\r\n _snake_case \t= -1\r\n _snake_case \t= True\r\n _snake_case \t= False\r\n return\r\n\r\n assert torch.cuda.is_available()\r\n\r\n logger.info('''Initializing GPUs'''\t)\r\n if params.n_gpu > 1:\r\n assert params.local_rank != -1\r\n\r\n _snake_case \t= int(os.environ['''WORLD_SIZE''']\t)\r\n _snake_case \t= int(os.environ['''N_GPU_NODE''']\t)\r\n _snake_case \t= int(os.environ['''RANK''']\t)\r\n\r\n # number of nodes / node ID\r\n _snake_case \t= params.world_size // params.n_gpu_per_node\r\n _snake_case \t= params.global_rank // params.n_gpu_per_node\r\n _snake_case \t= True\r\n\r\n assert params.n_nodes == int(os.environ['''N_NODES''']\t)\r\n assert params.node_id == int(os.environ['''NODE_RANK''']\t)\r\n\r\n # local job (single GPU)\r\n else:\r\n assert params.local_rank == -1\r\n\r\n _snake_case \t= 1\r\n _snake_case \t= 0\r\n _snake_case \t= 0\r\n _snake_case \t= 0\r\n _snake_case \t= 1\r\n _snake_case \t= 1\r\n _snake_case \t= False\r\n\r\n # sanity checks\r\n assert params.n_nodes >= 1\r\n assert 0 <= params.node_id < params.n_nodes\r\n assert 0 <= params.local_rank <= params.global_rank < params.world_size\r\n assert params.world_size == params.n_nodes * params.n_gpu_per_node\r\n\r\n # define whether this is the master process / if we are in multi-node distributed mode\r\n _snake_case \t= params.node_id == 0 and params.local_rank == 0\r\n _snake_case \t= params.n_nodes > 1\r\n\r\n # summary\r\n _snake_case \t= f\"\"\"--- Global rank: {params.global_rank} - \"\"\"\r\n logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes\t)\r\n logger.info(PREFIX + '''Node ID : %i''' % params.node_id\t)\r\n logger.info(PREFIX + '''Local rank : %i''' % params.local_rank\t)\r\n logger.info(PREFIX + '''World size : %i''' % params.world_size\t)\r\n logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node\t)\r\n logger.info(PREFIX + '''Master : %s''' % str(params.is_master\t)\t)\r\n logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node\t)\t)\r\n logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu\t)\t)\r\n logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname()\t)\r\n\r\n # set GPU device\r\n torch.cuda.set_device(params.local_rank\t)\r\n\r\n # initialize multi-GPU\r\n if params.multi_gpu:\r\n logger.info('''Initializing PyTorch distributed'''\t)\r\n torch.distributed.init_process_group(\r\n init_method='''env://''' ,\t\t\t\t\t\tbackend='''nccl''' ,\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\tsnake_case\t\t(\t\tlowerCAmelCase_\t)\t\t\t\t\t\t->\t\t\t\tDict:\r\n np.random.seed(args.seed\t)\r\n torch.manual_seed(args.seed\t)\r\n if args.n_gpu > 0:\r\n torch.cuda.manual_seed_all(args.seed\t)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":103,"string":"103"},"style_context":{"kind":"string","value":"\r\r\rimport secrets\rfrom random import shuffle\rfrom string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tint = 8\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r a\t\t:Optional[int]\t\t\t\t\t\t\t\t\t=\t\t\tascii_letters + digits + punctuation\r return \"\".join(secrets.choice(UpperCAmelCase_\t\t) for _ in range(UpperCAmelCase_\t\t)\t\t)\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tstr\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tint\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r i -= len(UpperCAmelCase_\t\t)\r a\t\t:Tuple\t\t\t\t\t\t\t\t\t=\t\t\ti // 3\r a\t\t:int\t\t\t\t\t\t\t\t\t=\t\t\ti % 3\r # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +\r # random_number(digits, i / 3) + random_characters(punctuation, i / 3)\r a\t\t:Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t\t(\r chars_incl\r + random(UpperCAmelCase_\t\t\t\t\t, quotient + remainder\t\t)\r + random(UpperCAmelCase_\t\t\t\t\t, UpperCAmelCase_\t\t)\r + random(UpperCAmelCase_\t\t\t\t\t, UpperCAmelCase_\t\t)\r )\r a\t\t:Dict\t\t\t\t\t\t\t\t\t=\t\t\tlist(UpperCAmelCase_\t\t)\r shuffle(UpperCAmelCase_\t\t)\r return \"\".join(UpperCAmelCase_\t\t)\r\r # random is a generalised function for letters, characters and numbers\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tstr\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tint\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r return \"\".join(secrets.choice(UpperCAmelCase_\t\t) for _ in range(UpperCAmelCase_\t\t)\t\t)\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tTuple\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r pass # Put your code here...\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tstr\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r pass # Put your code here...\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tstr\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tOptional[int]\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r pass # Put your code here...\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\tUpperCAmelCase_\t\t:\t\t\t\t\t\tstr\t\t\t\t\t, UpperCAmelCase_\t\t:\t\t\t\t\t\tint = 8\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r if len(UpperCAmelCase_\t\t) < min_length:\r # Your Password must be at least 8 characters long\r return False\r\r a\t\t:Dict\t\t\t\t\t\t\t\t\t=\t\t\tany(char in ascii_uppercase for char in password\t\t)\r a\t\t:Optional[int]\t\t\t\t\t\t\t\t\t=\t\t\tany(char in ascii_lowercase for char in password\t\t)\r a\t\t:Tuple\t\t\t\t\t\t\t\t\t=\t\t\tany(char in digits for char in password\t\t)\r a\t\t:Any\t\t\t\t\t\t\t\t\t=\t\t\tany(char in punctuation for char in password\t\t)\r\r return upper and lower and num and spec_char\r # Passwords should contain UPPERCASE, lowerase\r # numbers, and special characters\rdef \t\t\t\t\t\t\t__lowerCamelCase\t\t(\t\t\t\t\t):\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r a\t\t:int\t\t\t\t\t\t\t\t\t=\t\t\tint(input('''Please indicate the max length of your password: '''\t\t).strip()\t\t)\r a\t\t:Union[str, Any]\t\t\t\t\t\t\t\t\t=\t\t\tinput(\r '''Please indicate the characters that must be in your password: '''\t\t).strip()\r print('''Password generated:'''\t\t\t\t\t, password_generator(UpperCAmelCase_\t\t)\t\t)\r print(\r '''Alternative Password generated:'''\t\t\t\t\t, alternative_password_generator(UpperCAmelCase_\t\t\t\t\t, UpperCAmelCase_\t\t)\t\t\t\t\t, )\r print('''[If you are thinking of using this passsword, You better save it.]'''\t\t)\r\r\rif __name__ == \"__main__\":\r main()\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":445,"string":"445"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":557,"cells":{"code":{"kind":"string","value":"\r\r\r'''simple docstring'''\r\r\r\rdef \t\t\t_a\t\t\t(\t\t_lowercase :\t\t\tAny\t\t\t\t\t\t): # noqa: E741\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r __UpperCAmelCase\t\t\t:\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t\t= len(lowerCAmelCase__\t\t\t\t\t\t)\r __UpperCAmelCase\t\t\t:\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\r __UpperCAmelCase\t\t\t:\t\t\t\tAny\t\t\t\t\t\t\t\t\t\t\t\t\t= [0] * n\r __UpperCAmelCase\t\t\t:\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= [False] * n\r __UpperCAmelCase\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t\t\t= [False] * n\r\r def dfs(_lowercase :\t\t\tAny , _lowercase :\t\t\tAny , _lowercase :\t\t\tDict , _lowercase :\t\t\tOptional[int]\t\t\t\t\t\t):\r if parent == root:\r out_edge_count += 1\r __UpperCAmelCase\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t\t\t= True\r __UpperCAmelCase\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t\t\t= at\r\r for to in l[at]:\r if to == parent:\r pass\r elif not visited[to]:\r __UpperCAmelCase\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t\t\t= dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t\t\t\t\t)\r __UpperCAmelCase\t\t\t:\t\t\t\tint\t\t\t\t\t\t\t\t\t\t\t\t\t= min(low[at] , low[to]\t\t\t\t\t\t)\r\r # AP found via bridge\r if at < low[to]:\r __UpperCAmelCase\t\t\t:\t\t\t\tTuple\t\t\t\t\t\t\t\t\t\t\t\t\t= True\r # AP found via cycle\r if at == low[to]:\r __UpperCAmelCase\t\t\t:\t\t\t\tList[str]\t\t\t\t\t\t\t\t\t\t\t\t\t= True\r else:\r __UpperCAmelCase\t\t\t:\t\t\t\tList[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= min(low[at] , lowerCAmelCase__\t\t\t\t\t\t)\r return out_edge_count\r\r for i in range(lowerCAmelCase__\t\t\t\t\t\t):\r if not visited[i]:\r __UpperCAmelCase\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= 0\r __UpperCAmelCase\t\t\t:\t\t\t\tstr\t\t\t\t\t\t\t\t\t\t\t\t\t= dfs(lowerCAmelCase__ , lowerCAmelCase__ , -1 , lowerCAmelCase__\t\t\t\t\t\t)\r __UpperCAmelCase\t\t\t:\t\t\t\tDict\t\t\t\t\t\t\t\t\t\t\t\t\t= out_edge_count > 1\r\r for x in range(len(lowerCAmelCase__\t\t\t\t\t\t)\t\t\t\t\t\t):\r if is_art[x] is True:\r print(lowerCAmelCase__\t\t\t\t\t\t)\r\r\r# Adjacency list of graph\r__UpperCAmelCase :Optional[int]\t\t\t\t = {\r 0: [1, 2],\r 1: [0, 2],\r 2: [0, 1, 3, 5],\r 3: [2, 4],\r 4: [3],\r 5: [2, 6, 8],\r 6: [5, 7],\r 7: [6, 8],\r 8: [5, 7],\r}\r\rcompute_ap(data)"},"code_codestyle":{"kind":"number","value":706,"string":"706"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import (\r\n OptionalDependencyNotAvailable,\r\n _LazyModule,\r\n is_tokenizers_available,\r\n is_torch_available,\r\n is_vision_available,\r\n)\r\n\r\n\r\n__UpperCAmelCase :Optional[Any]\t\t\t\t = {\r\n \"configuration_layoutlmv2\": [\"LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"LayoutLMv2Config\"],\r\n \"processing_layoutlmv2\": [\"LayoutLMv2Processor\"],\r\n \"tokenization_layoutlmv2\": [\"LayoutLMv2Tokenizer\"],\r\n}\r\n\r\ntry:\r\n if not is_tokenizers_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n __UpperCAmelCase :Any\t\t\t\t = [\"LayoutLMv2TokenizerFast\"]\r\n\r\ntry:\r\n if not is_vision_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n __UpperCAmelCase :int\t\t\t\t = [\"LayoutLMv2FeatureExtractor\"]\r\n __UpperCAmelCase :Optional[int]\t\t\t\t = [\"LayoutLMv2ImageProcessor\"]\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n __UpperCAmelCase :List[Any]\t\t\t\t = [\r\n \"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST\",\r\n \"LayoutLMv2ForQuestionAnswering\",\r\n \"LayoutLMv2ForSequenceClassification\",\r\n \"LayoutLMv2ForTokenClassification\",\r\n \"LayoutLMv2Layer\",\r\n \"LayoutLMv2Model\",\r\n \"LayoutLMv2PreTrainedModel\",\r\n ]\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig\r\n from .processing_layoutlmva import LayoutLMvaProcessor\r\n from .tokenization_layoutlmva import LayoutLMvaTokenizer\r\n\r\n try:\r\n if not is_tokenizers_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast\r\n\r\n try:\r\n if not is_vision_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_layoutlmva import (\r\n LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,\r\n LayoutLMvaForQuestionAnswering,\r\n LayoutLMvaForSequenceClassification,\r\n LayoutLMvaForTokenClassification,\r\n LayoutLMvaLayer,\r\n LayoutLMvaModel,\r\n LayoutLMvaPreTrainedModel,\r\n )\r\nelse:\r\n import sys\r\n\r\n __UpperCAmelCase :Any\t\t\t\t = _LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)"},"style_context_codestyle":{"kind":"number","value":266,"string":"266"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":558,"cells":{"code":{"kind":"string","value":"\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nfrom ...configuration_utils import PretrainedConfig\nfrom ...utils import logging\n\n\nUpperCAmelCase :\tTuple \t\t=\t\t\t\t\t\tlogging.get_logger(__name__)\n\nUpperCAmelCase :\tint \t\t=\t\t\t\t\t\t{\n 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',\n}\n\n\n\n\n\nclass \tlowerCamelCase__ (\tA\t\t\t\t\t\t\t):\n\n\n\n\n\n\n\n\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\t\t\t\t\t__a \t\t\t=\t\t\t\t\"\"\"roc_bert\"\"\"\n\n\n\n\n\n\n\t\t\t\t\tdef __init__(\t\t\tself :\t\t\t\t\tOptional[Any]\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[Any]=30_522\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tDict=768\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tTuple=12\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tDict=12\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[str]=3_072\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tOptional[int]=\"gelu\"\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[str]=0.1\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tDict=0.1\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tUnion[str, Any]=512\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tOptional[Any]=2\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[Any]=0.02\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[str]=1e-1_2\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tAny=True\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tint=0\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tList[str]=\"absolute\"\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tOptional[int]=None\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tint=True\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tTuple=True\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tAny=768\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tint=910\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tUnion[str, Any]=512\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tTuple=24_858\t\t\t\t\t,\t\t\tUpperCamelCase :\t\t\t\t\tOptional[int]=True\t\t\t\t\t,\t\t\t**UpperCamelCase :\t\t\t\t\tList[str]\t\t\t\t\t,\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tAny \t\t\t\t= vocab_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tint \t\t\t\t= max_position_embeddings\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[str] \t\t\t\t= hidden_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tDict \t\t\t\t= num_hidden_layers\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[int] \t\t\t\t= num_attention_heads\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[Any] \t\t\t\t= intermediate_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[Any] \t\t\t\t= hidden_act\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tstr \t\t\t\t= hidden_dropout_prob\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tAny \t\t\t\t= attention_probs_dropout_prob\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tTuple \t\t\t\t= initializer_range\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tint \t\t\t\t= type_vocab_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[Any] \t\t\t\t= layer_norm_eps\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tDict \t\t\t\t= use_cache\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tstr \t\t\t\t= enable_pronunciation\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tstr \t\t\t\t= enable_shape\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tint \t\t\t\t= pronunciation_embed_dim\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tint \t\t\t\t= pronunciation_vocab_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[Any] \t\t\t\t= shape_embed_dim\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tUnion[str, Any] \t\t\t\t= shape_vocab_size\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tint \t\t\t\t= concat_input\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[int] \t\t\t\t= position_embedding_type\n\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tDict \t\t\t\t= classifier_dropout\n\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(pad_token_id=UpperCamelCase\t\t\t\t\t,\t\t\t**UpperCamelCase )\n"},"code_codestyle":{"kind":"number","value":139,"string":"139"},"style_context":{"kind":"string","value":"\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\ndef \tlowerCamelCase\t\t\t(\t_UpperCamelCase\t\t\t\t\t\t: int = 1_0_0_0_0_0_0\t\t\t\t)\t\t\t\t\t\t->\t\t\tint:\n\n\t\t\t\t\t\t\t'''simple docstring'''\n\n\n\n\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[str] \t\t\t\t= 1\n\t\t\t\t\t\t\t__UpperCAmelCase :\t\tUnion[str, Any] \t\t\t\t= 1\n\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[Any] \t\t\t\t= {1: 1}\n\n\t\t\t\t\t\t\tfor inputa in range(2\t\t\t, _UpperCamelCase\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[int] \t\t\t\t= 0\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tstr \t\t\t\t= inputa\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif number in counters:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcounter += counters[number]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif number % 2 == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnumber //= 2\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcounter += 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tTuple \t\t\t\t= (3 * number) + 1\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcounter += 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif inputa not in counters:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tOptional[Any] \t\t\t\t= counter\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif counter > pre_counter:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[Any] \t\t\t\t= inputa\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t__UpperCAmelCase :\t\tList[Any] \t\t\t\t= counter\n\t\t\t\t\t\t\treturn largest_number\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t\t\tprint(solution(int(input().strip())))\n"},"style_context_codestyle":{"kind":"number","value":139,"string":"139"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":559,"cells":{"code":{"kind":"string","value":"'''simple docstring'''\rimport os\rfrom pathlib import Path\r\r\r\r\r\r\r\rdef _UpperCamelCase ( ):\r\r\r\r\r\t\t\t\t\"\"\"simple docstring\"\"\"\r\r\r\r\t\t\t\tfrom torch.utils.cpp_extension import load\r\r\t\t\t\t__UpperCamelCase\t:\tOptional[int]\t\t = Path(_a\t\t\t).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'\r\t\t\t\t__UpperCamelCase\t:\tOptional[Any]\t\t = [\r\t\t\t\t root / filename\r\t\t\t\t for filename in [\r\t\t\t\t 'vision.cpp',\r\t\t\t\t os.path.join('cpu'\t\t\t\t, 'ms_deform_attn_cpu.cpp'\t\t\t),\r\t\t\t\t os.path.join('cuda'\t\t\t\t, 'ms_deform_attn_cuda.cu'\t\t\t),\r\t\t\t\t ]\r\t\t\t\t]\r\r\t\t\t\tload(\r\t\t\t\t 'MultiScaleDeformableAttention'\t\t\t\t, _a\t\t\t\t, with_cuda=_a\t\t\t\t, extra_include_paths=[str(_a\t\t\t)]\t\t\t\t, extra_cflags=['-DWITH_CUDA=1']\t\t\t\t, extra_cuda_cflags=[\r\t\t\t\t '-DCUDA_HAS_FP16=1',\r\t\t\t\t '-D__CUDA_NO_HALF_OPERATORS__',\r\t\t\t\t '-D__CUDA_NO_HALF_CONVERSIONS__',\r\t\t\t\t '-D__CUDA_NO_HALF2_OPERATORS__',\r\t\t\t\t ]\t\t\t\t, )\r\r\t\t\t\timport MultiScaleDeformableAttention as MSDA\r\r\t\t\t\treturn MSDA\r\r\r\r"},"code_codestyle":{"kind":"number","value":720,"string":"720"},"style_context":{"kind":"string","value":"'''simple docstring'''\ra= '''0.21.0'''\r\rfrom .accelerator import Accelerator\rfrom .big_modeling import (\r cpu_offload,\r cpu_offload_with_hook,\r disk_offload,\r dispatch_model,\r init_empty_weights,\r init_on_device,\r load_checkpoint_and_dispatch,\r)\rfrom .data_loader import skip_first_batches\rfrom .launchers import debug_launcher, notebook_launcher\rfrom .state import PartialState\rfrom .utils import (\r DeepSpeedPlugin,\r DistributedDataParallelKwargs,\r DistributedType,\r FullyShardedDataParallelPlugin,\r GradScalerKwargs,\r InitProcessGroupKwargs,\r find_executable_batch_size,\r infer_auto_device_map,\r is_rich_available,\r load_checkpoint_in_model,\r synchronize_rng_states,\r)\r\r\rif is_rich_available():\r from .utils import rich\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":287,"string":"287"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":560,"cells":{"code":{"kind":"string","value":"\n\n\n\n\nfrom typing import Dict, List, Optional, Union\n\nimport numpy as np\n\nfrom ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\nfrom ...image_transforms import rescale, resize, to_channel_dimension_format\nfrom ...image_utils import (\n ChannelDimension,\n ImageInput,\n PILImageResampling,\n make_list_of_images,\n to_numpy_array,\n valid_images,\n)\nfrom ...utils import TensorType, is_vision_available, logging\n\n\nif is_vision_available():\n import PIL\n\n\nlowerCamelCase__\t\t\t\t\t: Dict \t\t\t\t\t\t\t=\t\t\t\tlogging.get_logger(__name__)\n\ndef UpperCAmelCase_\t\t\t\t( __UpperCAmelCase\t\t: Optional[Any] , __UpperCAmelCase\t\t: List[Any]\t\t\t\t) ->\t\t\t\t\tUnion[str, Any]:\n SCREAMING_SNAKE_CASE_ \t\t= b.T\n SCREAMING_SNAKE_CASE_ \t\t= np.sum(np.square(__UpperCAmelCase\t\t\t\t) , axis=1\t\t\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= np.sum(np.square(__UpperCAmelCase\t\t\t\t) , axis=0\t\t\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= np.matmul(__UpperCAmelCase , __UpperCAmelCase\t\t\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= aa[:, None] - 2 * ab + ba[None, :]\n return d\n\ndef UpperCAmelCase_\t\t\t\t( __UpperCAmelCase\t\t: Dict , __UpperCAmelCase\t\t: Optional[Any]\t\t\t\t) ->\t\t\t\t\tOptional[int]:\n SCREAMING_SNAKE_CASE_ \t\t= x.reshape(-1 , 3\t\t\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase\t\t\t\t)\n return np.argmin(__UpperCAmelCase , axis=1\t\t\t\t)\n\n\n\n\n\nclass \t\t\tlowerCamelCase_\t\t\t\t( _SCREAMING_SNAKE_CASE ):\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n lowercase_\t\t\t\t\t\t\t = [\"pixel_values\"]\n\n\n\n def __init__(\t\t\t\tself :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[List[List[int]], np.ndarray]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tbool = True\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tDict[str, int] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tPILImageResampling = PILImageResampling.BILINEAR\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tbool = True\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tbool = True\t\t\t\t\t\t\t,\t\t\t\t\t\t\t**_lowerCAmelCase :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t):\n super().__init__(**_lowerCAmelCase\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= size if size is not None else {'height': 256, 'width': 256}\n SCREAMING_SNAKE_CASE_ \t\t= get_size_dict(_lowerCAmelCase\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= np.array(_lowerCAmelCase\t\t) if clusters is not None else None\n SCREAMING_SNAKE_CASE_ \t\t= do_resize\n SCREAMING_SNAKE_CASE_ \t\t= size\n SCREAMING_SNAKE_CASE_ \t\t= resample\n SCREAMING_SNAKE_CASE_ \t\t= do_normalize\n SCREAMING_SNAKE_CASE_ \t\t= do_color_quantize\n\n\n\n def lowerCAmelCase_ (\t\t\t\tself :\t\t\t\t\t\tstr\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tnp.ndarray\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tDict[str, int]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tPILImageResampling = PILImageResampling.BILINEAR\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t**_lowerCAmelCase :\t\t\t\t\t\tTuple\t\t\t\t\t\t\t,\t\t\t\t\t\t\t):\n SCREAMING_SNAKE_CASE_ \t\t= get_size_dict(_lowerCAmelCase\t\t)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(F\"Size dictionary must contain both height and width keys. Got {size.keys()}\"\t\t)\n return resize(\n _lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tsize=(size['height'], size['width'])\t\t\t\t\t\t\t,\t\t\t\t\t\t\tresample=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tdata_format=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\t**_lowerCAmelCase\t\t)\n\n\n\n def lowerCAmelCase_ (\t\t\t\tself :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tnp.ndarray\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[str, ChannelDimension]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t):\n SCREAMING_SNAKE_CASE_ \t\t= rescale(image=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tscale=1 / 127.5\t\t\t\t\t\t\t,\t\t\t\t\t\t\tdata_format=_lowerCAmelCase\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= image - 1\n return image\n\n\n\n\n def lowerCAmelCase_ (\t\t\t\tself :\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tImageInput\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tbool = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tDict[str, int] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tPILImageResampling = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tbool = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[bool] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[List[List[int]], np.ndarray]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[str, TensorType]] = None\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase :\t\t\t\t\t\tOptional[Union[str, ChannelDimension]] = ChannelDimension.FIRST\t\t\t\t\t\t\t,\t\t\t\t\t\t\t**_lowerCAmelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t,\t\t\t\t\t\t\t):\n SCREAMING_SNAKE_CASE_ \t\t= do_resize if do_resize is not None else self.do_resize\n SCREAMING_SNAKE_CASE_ \t\t= size if size is not None else self.size\n SCREAMING_SNAKE_CASE_ \t\t= get_size_dict(_lowerCAmelCase\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= resample if resample is not None else self.resample\n SCREAMING_SNAKE_CASE_ \t\t= do_normalize if do_normalize is not None else self.do_normalize\n SCREAMING_SNAKE_CASE_ \t\t= do_color_quantize if do_color_quantize is not None else self.do_color_quantize\n SCREAMING_SNAKE_CASE_ \t\t= clusters if clusters is not None else self.clusters\n SCREAMING_SNAKE_CASE_ \t\t= np.array(_lowerCAmelCase\t\t)\n\n SCREAMING_SNAKE_CASE_ \t\t= make_list_of_images(_lowerCAmelCase\t\t)\n\n if not valid_images(_lowerCAmelCase\t\t):\n raise ValueError(\n 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '\n 'torch.Tensor, tf.Tensor or jax.ndarray.'\t\t)\n\n if do_resize and size is None or resample is None:\n raise ValueError('Size and resample must be specified if do_resize is True.'\t\t)\n\n if do_color_quantize and clusters is None:\n raise ValueError('Clusters must be specified if do_color_quantize is True.'\t\t)\n\n # All transformations expect numpy arrays.\n SCREAMING_SNAKE_CASE_ \t\t= [to_numpy_array(_lowerCAmelCase\t\t) for image in images]\n\n if do_resize:\n SCREAMING_SNAKE_CASE_ \t\t= [self.resize(image=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tsize=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tresample=_lowerCAmelCase\t\t) for image in images]\n\n if do_normalize:\n SCREAMING_SNAKE_CASE_ \t\t= [self.normalize(image=_lowerCAmelCase\t\t) for image in images]\n\n if do_color_quantize:\n SCREAMING_SNAKE_CASE_ \t\t= [to_channel_dimension_format(_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\tChannelDimension.LAST\t\t) for image in images]\n # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)\n SCREAMING_SNAKE_CASE_ \t\t= np.array(_lowerCAmelCase\t\t)\n SCREAMING_SNAKE_CASE_ \t\t= color_quantize(_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t).reshape(images.shape[:-1]\t\t)\n\n # flatten to (batch_size, height*width)\n SCREAMING_SNAKE_CASE_ \t\t= images.shape[0]\n SCREAMING_SNAKE_CASE_ \t\t= images.reshape(_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\t-1\t\t)\n\n # We need to convert back to a list of images to keep consistent behaviour across processors.\n SCREAMING_SNAKE_CASE_ \t\t= list(_lowerCAmelCase\t\t)\n else:\n SCREAMING_SNAKE_CASE_ \t\t= [to_channel_dimension_format(_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\t_lowerCAmelCase\t\t) for image in images]\n\n SCREAMING_SNAKE_CASE_ \t\t= {'input_ids': images}\n return BatchFeature(data=_lowerCAmelCase\t\t\t\t\t\t\t,\t\t\t\t\t\t\ttensor_type=_lowerCAmelCase\t\t)"},"code_codestyle":{"kind":"number","value":31,"string":"31"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\ndef \t_A\t\t\t(\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t: list[int]\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t: int\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n a__ : List[str] =len(SCREAMING_SNAKE_CASE\t\t)\r\n a__ : Optional[int] =[[False] * (required_sum + 1) for _ in range(arr_len + 1\t\t)]\r\n\r\n # for each arr value, a sum of zero(0) can be formed by not taking any element\r\n # hence True/1\r\n for i in range(arr_len + 1\t\t):\r\n a__ : Optional[int] =True\r\n\r\n # sum is not zero and set is empty then false\r\n for i in range(1\t\t\t,\t\t\trequired_sum + 1\t\t):\r\n a__ : str =False\r\n\r\n for i in range(1\t\t\t,\t\t\tarr_len + 1\t\t):\r\n for j in range(1\t\t\t,\t\t\trequired_sum + 1\t\t):\r\n if arr[i - 1] > j:\r\n a__ : str =subset[i - 1][j]\r\n if arr[i - 1] <= j:\r\n a__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]\r\n\r\n return subset[arr_len][required_sum]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":563,"string":"563"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":561,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\nimport inspect\r\nimport unittest\r\n\r\nimport numpy as np\r\n\r\nfrom tests.test_modeling_common import floats_tensor\r\nfrom transformers import MaskaFormerConfig, is_torch_available, is_vision_available\r\nfrom transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device\r\nfrom transformers.utils import cached_property\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n\r\n from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel\r\n\r\n if is_vision_available():\r\n from transformers import MaskaFormerImageProcessor\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass __a :\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself\t\t\t\t:\t\t\tList[str]\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[str]\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tOptional[int]=2\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[str]=True\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[Any]=False\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[Any]=10\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tstr=3\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tOptional[Any]=32 * 8\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tOptional[int]=32 * 8\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tAny=4\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tAny=64\t\t\t\t,):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= parent\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= batch_size\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= is_training\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= use_auxiliary_loss\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= num_queries\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= num_channels\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= min_size\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= max_size\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= num_labels\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= hidden_dim\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= hidden_dim\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tOptional[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(\r\n UpperCamelCase_ )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.ones([self.batch_size, self.min_size, self.max_size]\t\t\t\t,device=UpperCamelCase_ )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= (\r\n torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size]\t\t\t\t,device=UpperCamelCase_ ) > 0.5\r\n ).float()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= (torch.rand((self.batch_size, self.num_labels)\t\t\t\t,device=UpperCamelCase_ ) > 0.5).long()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.get_config()\r\n return config, pixel_values, pixel_mask, mask_labels, class_labels\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[str] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerConfig(\r\n hidden_size=self.hidden_dim\t\t\t\t,)\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.num_queries\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.num_labels\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [1, 1, 1, 1]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.num_channels\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= 64\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= 128\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.hidden_dim\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.hidden_dim\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.hidden_dim\r\n return config\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tOptional[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.prepare_config_and_inputs()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= {\"pixel_values\": pixel_values, \"pixel_mask\": pixel_mask}\r\n return config, inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tstr\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[str]\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tAny ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= output.encoder_hidden_states\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= output.pixel_decoder_hidden_states\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= output.transformer_decoder_hidden_states\r\n\r\n self.parent.assertTrue(len(UpperCamelCase_ )\t\t\t\t,len(config.backbone_config.depths ) )\r\n self.parent.assertTrue(len(UpperCamelCase_ )\t\t\t\t,len(config.backbone_config.depths ) )\r\n self.parent.assertTrue(len(UpperCamelCase_ )\t\t\t\t,config.decoder_layers )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tint\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tTuple\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[Any]\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tDict\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tUnion[str, Any]=False ):\r\n\r\n '''simple docstring'''\r\n\r\n with torch.no_grad():\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerModel(config=UpperCamelCase_ )\r\n model.to(UpperCamelCase_ )\r\n model.eval()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(pixel_values=UpperCamelCase_\t\t\t\t,pixel_mask=UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(UpperCamelCase_\t\t\t\t,output_hidden_states=UpperCamelCase_ )\r\n\r\n self.parent.assertEqual(\r\n output.transformer_decoder_last_hidden_state.shape\t\t\t\t,(self.batch_size, self.num_queries, self.hidden_dim)\t\t\t\t,)\r\n # let's ensure the other two hidden state exists\r\n self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )\r\n self.parent.assertTrue(output.encoder_last_hidden_state is not None )\r\n\r\n if output_hidden_states:\r\n self.check_output_hidden_state(UpperCamelCase_\t\t\t\t,UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tAny\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tDict\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tOptional[Any]\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tTuple\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tTuple\t\t\t\t,lowerCamelCase\t\t\t\t:\t\t\tList[str] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerForUniversalSegmentation(config=UpperCamelCase_ )\r\n model.to(UpperCamelCase_ )\r\n model.eval()\r\n\r\n def comm_check_on_output(lowerCamelCase\t\t\t\t:\t\t\tList[Any] ):\r\n # let's still check that all the required stuff is there\r\n self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )\r\n self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )\r\n self.parent.assertTrue(result.encoder_last_hidden_state is not None )\r\n # okay, now we need to check the logits shape\r\n # due to the encoder compression, masks have a //4 spatial size\r\n self.parent.assertEqual(\r\n result.masks_queries_logits.shape\t\t\t\t,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4)\t\t\t\t,)\r\n # + 1 for null class\r\n self.parent.assertEqual(\r\n result.class_queries_logits.shape\t\t\t\t,(self.batch_size, self.num_queries, self.num_labels + 1) )\r\n\r\n with torch.no_grad():\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(pixel_values=UpperCamelCase_\t\t\t\t,pixel_mask=UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(UpperCamelCase_ )\r\n\r\n comm_check_on_output(UpperCamelCase_ )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(\r\n pixel_values=UpperCamelCase_\t\t\t\t,pixel_mask=UpperCamelCase_\t\t\t\t,mask_labels=UpperCamelCase_\t\t\t\t,class_labels=UpperCamelCase_ )\r\n\r\n comm_check_on_output(UpperCamelCase_ )\r\n\r\n self.parent.assertTrue(result.loss is not None )\r\n self.parent.assertEqual(result.loss.shape\t\t\t\t,torch.Size([1] ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_torch\r\nclass __a ( _snake_case, _snake_case, unittest.TestCase\t\t\t):\r\n __UpperCamelCase : List[Any]\t =\t(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()\r\n __UpperCamelCase : Tuple\t =\t{\"feature-extraction\": MaskaFormerModel} if is_torch_available() else {}\r\n\r\n __UpperCamelCase : List[str]\t =\tFalse\r\n __UpperCamelCase : Optional[int]\t =\tFalse\r\n __UpperCamelCase : Tuple\t =\tFalse\r\n __UpperCamelCase : Optional[Any]\t =\tFalse\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerModelTester(self )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= ConfigTester(self\t\t\t\t,config_class=UpperCamelCase_\t\t\t\t,has_text_modality=UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tint ):\r\n\r\n '''simple docstring'''\r\n\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tAny ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_\t\t\t\t,**UpperCamelCase_\t\t\t\t,output_hidden_states=UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tTuple ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"Mask2Former does not use inputs_embeds\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tTuple ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"Mask2Former does not have a get_input_embeddings method\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tstr ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"Mask2Former is not a generative model\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tOptional[int] ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason=\"\"\"Mask2Former does not use token embeddings\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tAny ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @require_torch_multi_gpu\r\n @unittest.skip(\r\n reason=\"\"\"Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tAny ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(\"\"\"Will be fixed soon by reducing the size of the model used for common tests.\"\"\" )\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tOptional[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tTuple ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model_class(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= inspect.signature(model.forward )\r\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [*signature.parameters.keys()]\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [\"pixel_values\"]\r\n self.assertListEqual(arg_names[:1]\t\t\t\t,UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n for model_name in [\"facebook/mask2former-swin-small-coco-instance\"]:\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerModel.from_pretrained(UpperCamelCase_ )\r\n self.assertIsNotNone(UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tDict ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= (self.model_tester.min_size,) * 2\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= {\r\n \"pixel_values\": torch.randn((2, 3, *size)\t\t\t\t,device=UpperCamelCase_ ),\r\n \"mask_labels\": torch.randn((2, 10, *size)\t\t\t\t,device=UpperCamelCase_ ),\r\n \"class_labels\": torch.zeros(2\t\t\t\t,10\t\t\t\t,device=UpperCamelCase_ ).long(),\r\n }\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.get_config()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerForUniversalSegmentation(UpperCamelCase_ ).to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(**UpperCamelCase_ )\r\n self.assertTrue(outputs.loss is not None )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tDict ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_\t\t\t\t,**UpperCamelCase_\t\t\t\t,output_hidden_states=UpperCamelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tint ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model_class(UpperCamelCase_ ).to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(**UpperCamelCase_\t\t\t\t,output_attentions=UpperCamelCase_ )\r\n self.assertTrue(outputs.attentions is not None )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tDict ):\r\n\r\n '''simple docstring'''\r\n\r\n if not self.model_tester.is_training:\r\n return\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.all_model_classes[1]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model_class(UpperCamelCase_ )\r\n model.to(UpperCamelCase_ )\r\n model.train()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(UpperCamelCase_\t\t\t\t,mask_labels=UpperCamelCase_\t\t\t\t,class_labels=UpperCamelCase_ ).loss\r\n loss.backward()\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tOptional[int] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.all_model_classes[1]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.model_tester.prepare_config_and_inputs()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= True\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= True\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model_class(UpperCamelCase_ ).to(UpperCamelCase_ )\r\n model.train()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(UpperCamelCase_\t\t\t\t,mask_labels=UpperCamelCase_\t\t\t\t,class_labels=UpperCamelCase_ )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.encoder_hidden_states[0]\r\n encoder_hidden_states.retain_grad()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.pixel_decoder_hidden_states[0]\r\n pixel_decoder_hidden_states.retain_grad()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.transformer_decoder_hidden_states[0]\r\n transformer_decoder_hidden_states.retain_grad()\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.attentions[0]\r\n attentions.retain_grad()\r\n\r\n outputs.loss.backward(retain_graph=UpperCamelCase_ )\r\n\r\n self.assertIsNotNone(encoder_hidden_states.grad )\r\n self.assertIsNotNone(pixel_decoder_hidden_states.grad )\r\n self.assertIsNotNone(transformer_decoder_hidden_states.grad )\r\n self.assertIsNotNone(attentions.grad )\r\n\r\n\r\n\r\n\r\n\r\na =\t\t\t\t\t\t1E-4\r\n\r\ndef \t\t\t\t\t\t__magic_name__ (\t\t\t\t\t\t\t)\t\t\t\t\t\t->\t\t\t\t\tList[str]:\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= Image.open(\"\"\"./tests/fixtures/tests_samples/COCO/000000039769.png\"\"\" )\r\n return image\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@require_vision\r\n@slow\r\nclass __a ( unittest.TestCase\t\t\t):\r\n\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n return \"facebook/mask2former-swin-small-coco-instance\"\r\n\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tAny ):\r\n\r\n '''simple docstring'''\r\n\r\n return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tDict ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.default_image_processor\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= prepare_img()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= image_processor(UpperCamelCase_\t\t\t\t,return_tensors=\"\"\"pt\"\"\" ).to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= inputs[\"pixel_values\"].shape\r\n # check size is divisible by 32\r\n self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )\r\n # check size\r\n self.assertEqual(UpperCamelCase_\t\t\t\t,(1, 3, 384, 384) )\r\n\r\n with torch.no_grad():\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(**UpperCamelCase_ )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.tensor(\r\n [[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCamelCase_ )\r\n self.assertTrue(\r\n torch.allclose(\r\n outputs.encoder_last_hidden_state[0, 0, :3, :3]\t\t\t\t,UpperCamelCase_\t\t\t\t,atol=UpperCamelCase_ ) )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.tensor(\r\n [[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCamelCase_ )\r\n self.assertTrue(\r\n torch.allclose(\r\n outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3]\t\t\t\t,UpperCamelCase_\t\t\t\t,atol=UpperCamelCase_ ) )\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.tensor(\r\n [[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCamelCase_ )\r\n self.assertTrue(\r\n torch.allclose(\r\n outputs.transformer_decoder_last_hidden_state[0, :3, :3]\t\t\t\t,UpperCamelCase_\t\t\t\t,atol=UpperCamelCase_ ) )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.default_image_processor\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= prepare_img()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= image_processor(UpperCamelCase_\t\t\t\t,return_tensors=\"\"\"pt\"\"\" ).to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= inputs[\"pixel_values\"].shape\r\n # check size is divisible by 32\r\n self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )\r\n # check size\r\n self.assertEqual(UpperCamelCase_\t\t\t\t,(1, 3, 384, 384) )\r\n\r\n with torch.no_grad():\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(**UpperCamelCase_ )\r\n # masks_queries_logits\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.masks_queries_logits\r\n self.assertEqual(\r\n masks_queries_logits.shape\t\t\t\t,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [\r\n [-8.7_839, -9.0_056, -8.8_121],\r\n [-7.4_104, -7.0_313, -6.5_401],\r\n [-6.6_105, -6.3_427, -6.4_675],\r\n ]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )\r\n self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3]\t\t\t\t,UpperCamelCase_\t\t\t\t,atol=UpperCamelCase_ ) )\r\n # class_queries_logits\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= outputs.class_queries_logits\r\n self.assertEqual(class_queries_logits.shape\t\t\t\t,(1, model.config.num_queries, model.config.num_labels + 1) )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= torch.tensor(\r\n [\r\n [1.8_324, -8.0_835, -4.1_922],\r\n [0.8_450, -9.0_050, -3.6_053],\r\n [0.3_045, -7.7_293, -3.0_275],\r\n ] ).to(UpperCamelCase_ )\r\n self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3]\t\t\t\t,UpperCamelCase_\t\t\t\t,atol=UpperCamelCase_ ) )\r\n\r\n\r\n\r\n\r\n\r\n def UpperCAmelCase__ (\t\t\t\t\tself\t\t\t\t:\t\t\tList[Any] ):\r\n\r\n '''simple docstring'''\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= self.default_image_processor\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= image_processor(\r\n [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )]\t\t\t\t,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )]\t\t\t\t,return_tensors=\"\"\"pt\"\"\"\t\t\t\t,)\r\n\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= inputs[\"pixel_values\"].to(UpperCamelCase_ )\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [el.to(UpperCamelCase_ ) for el in inputs[\"mask_labels\"]]\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= [el.to(UpperCamelCase_ ) for el in inputs[\"class_labels\"]]\r\n\r\n with torch.no_grad():\r\n __SCREAMING_SNAKE_CASE \t\t\t\t\t\t\t= model(**UpperCamelCase_ )\r\n\r\n self.assertTrue(outputs.loss is not None )\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":710,"string":"710"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rfrom .dependency_versions_table import deps\rfrom .utils.versions import require_version, require_version_core\r\r\r# define which module versions we always want to check at run time\r# (usually the ones defined in `install_requires` in setup.py)\r#\r# order specific notes:\r# - tqdm must be checked before tokenizers\r\ra =\t\t\t\t\t\t[\r \"python\",\r \"tqdm\",\r \"regex\",\r \"requests\",\r \"packaging\",\r \"filelock\",\r \"numpy\",\r \"tokenizers\",\r \"huggingface-hub\",\r \"safetensors\",\r \"accelerate\",\r \"pyyaml\",\r]\r\rfor pkg in pkgs_to_check_at_runtime:\r if pkg in deps:\r if pkg == \"tokenizers\":\r # must be loaded here, or else tqdm check may fail\r from .utils import is_tokenizers_available\r\r if not is_tokenizers_available():\r continue # not required, check version only if installed\r elif pkg == \"accelerate\":\r # must be loaded here, or else tqdm check may fail\r from .utils import is_accelerate_available\r\r # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of\r # Transformers with PyTorch\r if not is_accelerate_available():\r continue # not required, check version only if installed\r\r require_version_core(deps[pkg])\r else:\r raise ValueError(F'''can\\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')\r\rdef \t\t\t\t\t\t__magic_name__ (\t\t\t\t\t\t\t__UpperCAmelCase\t\t\t\t\t\t\t,\t__UpperCAmelCase=None )\t\t\t\t\t\t->\t\t\t\t\tOptional[Any]:\r\r '''simple docstring'''\r\r\r\r\r\r\r\r require_version(deps[pkg]\t\t\t\t\t\t\t,\t__UpperCAmelCase )\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":13,"string":"13"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":562,"cells":{"code":{"kind":"string","value":"\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...utils import logging\r\n\r\n\r\n_snake_case = logging.get_logger(__name__)\r\n\r\n_snake_case = {\r\n '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',\r\n}\r\nclass _lowerCAmelCase (\t\t__magic_name__\t\t\t\t\t\t\t):\r\n\r\n \"\"\"simple docstring\"\"\"\r\n SCREAMING_SNAKE_CASE_\t\t\t\t:\t\t\t\t\tOptional[Any] \t\t\t\t\t\t=\"roc_bert\"\r\n\r\n def __init__(\t\t\t\t\t\tself\t\t\t:\t\t\tList[str]\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=3_05_22\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tList[str]=7_68\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[Any]=12\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tList[Any]=12\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tTuple=30_72\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tAny=\"gelu\"\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=0.1\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=0.1\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=5_12\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tstr=2\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tAny=0.02\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[Any]=1e-12\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tUnion[str, Any]=True\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tDict=0\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tList[Any]=\"absolute\"\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=None\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tDict=True\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tUnion[str, Any]=True\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tstr=7_68\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tList[Any]=9_10\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tTuple=5_12\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tint=2_48_58\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tOptional[int]=True\t\t\t\t\t, **SCREAMING_SNAKE_CASE__\t\t\t:\t\t\tstr\t\t\t\t\t, ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tvocab_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tmax_position_embeddings\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\thidden_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tnum_hidden_layers\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tnum_attention_heads\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tintermediate_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\thidden_act\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\thidden_dropout_prob\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tattention_probs_dropout_prob\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tinitializer_range\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\ttype_vocab_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tlayer_norm_eps\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tuse_cache\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tenable_pronunciation\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tenable_shape\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tpronunciation_embed_dim\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tpronunciation_vocab_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tshape_embed_dim\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tshape_vocab_size\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tconcat_input\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tposition_embedding_type\r\n UpperCamelCase\t\t\t\t\t\t =\t\t\t\t\tclassifier_dropout\r\n super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__\t\t\t\t\t, **SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":282,"string":"282"},"style_context":{"kind":"string","value":"\r\nfrom functools import lru_cache\r\n\r\n@lru_cache\r\ndef __lowerCamelCase\t\t\t\t\t( _lowercase\t\t\t) -> int:\r\n if num < 0:\r\n raise ValueError('Number should not be negative.'\t\t\t)\r\n\r\n return 1 if num in (0, 1) else num * factorial(num - 1\t\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":282,"string":"282"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":563,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\nfrom __future__ import annotations\n\nimport unittest\n\nfrom transformers import is_tf_available\nfrom transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow\n\n\nif is_tf_available():\n\timport numpy as np\n\timport tensorflow as tf\n\n\tfrom transformers import TFCamembertModel\n\n\n\n\n@require_tf\n@require_sentencepiece\n@require_tokenizers\nclass UpperCamelCase_ ( unittest.TestCase ):\n\n\n\t\t\t\t\t\t\t@slow\n\t\t\t\t\t\t\tdef _snake_case (\t\t\t\t\t\t\tself :Union[str, Any]\t\t) -> Tuple:\n\n\n\n\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= TFCamembertModel.from_pretrained(\"\"\"jplu/tf-camembert-base\"\"\"\t\t)\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= tf.convert_to_tensor(\n\t\t\t\t\t\t\t\t\t [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]]\t\t\t\t\t\t\t,\t\t\t\tdtype=tf.intaa\t\t\t\t\t\t\t,\t\t\t\t) # J'aime le camembert !\"\n\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= model(__A\t\t)[\"\"\"last_hidden_state\"\"\"]\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= tf.TensorShape((1, 10, 768)\t\t)\n\t\t\t\t\t\t\t\t\tself.assertEqual(output.shape\t\t\t\t\t\t\t,\t\t\t\t__A\t\t)\n\t\t\t\t\t\t\t\t\t# compare the actual values for a slice.\n\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= tf.convert_to_tensor(\n\t\t\t\t\t\t\t\t\t [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]]\t\t\t\t\t\t\t,\t\t\t\tdtype=tf.floataa\t\t\t\t\t\t\t,\t\t\t\t)\n\t\t\t\t\t\t\t\t\t# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')\n\t\t\t\t\t\t\t\t\t# camembert.eval()\n\t\t\t\t\t\t\t\t\t# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()\n\n\t\t\t\t\t\t\t\t\tself.assertTrue(np.allclose(output[:, :3, :3].numpy()\t\t\t\t\t\t\t,\t\t\t\texpected_slice.numpy()\t\t\t\t\t\t\t,\t\t\t\tatol=1E-4\t\t)\t\t)"},"code_codestyle":{"kind":"number","value":59,"string":"59"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport argparse\nimport logging\nimport os\n\nimport datasets\nimport tensorflow as tf\n\nfrom transformers import AutoTokenizer\n\n\n_lowerCamelCase \t\t\t\t=\t\t\tlogging.getLogger(__name__)\n\n\n\n\n\n\ndef \t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t( ):\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= argparse.ArgumentParser(\n\t\t description=\"\"\"Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.\"\"\" )\n\t\tparser.add_argument(\n\t\t \"\"\"--dataset_name\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=\"\"\"wikitext\"\"\"\t\t\t\t\t, help=\"\"\"Name of the training. Explore datasets at: hf.co/datasets.\"\"\"\t\t\t\t\t, )\n\t\tparser.add_argument(\n\t\t \"\"\"--dataset_config\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=\"\"\"wikitext-103-raw-v1\"\"\"\t\t\t\t\t, help=\"\"\"Configuration name of the dataset.\"\"\" )\n\t\tparser.add_argument(\n\t\t \"\"\"--tokenizer_name_or_path\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=\"\"\"sayakpaul/unigram-tokenizer-wikitext\"\"\"\t\t\t\t\t, help=\"\"\"Tokenizer identifier. Can be a local filepath or a Hub identifier.\"\"\"\t\t\t\t\t, )\n\t\tparser.add_argument(\n\t\t \"\"\"--shard_size\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=1_000\t\t\t\t\t, help=\"\"\"Number of entries to go in a single shard.\"\"\"\t\t\t\t\t, )\n\t\tparser.add_argument(\"\"\"--split\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=\"\"\"train\"\"\"\t\t\t\t\t, choices=[\"\"\"train\"\"\", \"\"\"test\"\"\", \"\"\"validation\"\"\"] )\n\t\tparser.add_argument(\n\t\t \"\"\"--limit\"\"\"\t\t\t\t\t, default=UpperCamelCase__\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, help=\"\"\"Limit the number of shards (used for debugging).\"\"\"\t\t\t\t\t, )\n\t\tparser.add_argument(\n\t\t \"\"\"--max_length\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, default=512\t\t\t\t\t, help=\"\"\"Maximum sequence length. For training on TPUs, it helps to have a maximum\"\"\"\n\t\t \"\"\" sequence length that is a multiple of 8.\"\"\"\t\t\t\t\t, )\n\t\tparser.add_argument(\n\t\t \"\"\"--output_dir\"\"\"\t\t\t\t\t, default=\"\"\"tf-tpu\"\"\"\t\t\t\t\t, type=UpperCamelCase__\t\t\t\t\t, help=\"\"\"Output directory where the TFRecord shards will be saved. If the\"\"\"\n\t\t \"\"\" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord\"\"\"\n\t\t \"\"\" shards will be directly saved to a Google Cloud Storage bucket.\"\"\"\t\t\t\t\t, )\n\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= parser.parse_args()\n\t\treturn args\n\n\n\n\n\n\ndef \t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t( UpperCamelCase__:\t\t\t\t\t\t\tList[Any] ):\n\t\tdef fn(UpperCamelCase__:\t\t\t\t\t\t\tAny ):\n\t\t\t\treturn tokenizer(examples[\"\"\"text\"\"\"] )\n\n\t\treturn fn\n\n\n\n\n\n\ndef \t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t( UpperCamelCase__:\t\t\t\t\t\t\tAny ):\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= []\n\t\tfor i in range(len(tokenized_data[\"\"\"input_ids\"\"\"] ) ):\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= {\n\t\t\t\t \"\"\"input_ids\"\"\": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data[\"\"\"input_ids\"\"\"][i] ) ),\n\t\t\t\t \"\"\"attention_mask\"\"\": tf.train.Feature(\n\t\t\t\t intaa_list=tf.train.IntaaList(value=tokenized_data[\"\"\"attention_mask\"\"\"][i] ) ),\n\t\t\t\t}\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= tf.train.Features(feature=UpperCamelCase__ )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= tf.train.Example(features=UpperCamelCase__ )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= example.SerializeToString()\n\t\t\t\trecords.append(UpperCamelCase__ )\n\t\treturn records\n\n\n\n\n\n\ndef \t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t( UpperCamelCase__:\t\t\t\t\t\t\tList[str] ):\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= datasets.load_dataset(args.dataset_name\t\t\t\t\t, args.dataset_config\t\t\t\t\t, split=args.split )\n\n\t\tif args.limit is not None:\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= min(len(UpperCamelCase__ )\t\t\t\t\t, args.limit )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= dataset.select(range(UpperCamelCase__ ) )\n\t\t\t\tprint(f'''Limiting the dataset to {args.limit} entries.''' )\n\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )\n\n\t\t# Handle output directory creation.\n\t\t# For serializing into a Google Cloud Storage Bucket, one needs to first\n\t\t# create a bucket.\n\t\tif \"gs\" not in args.output_dir:\n\t\t\t\tif not os.path.exists(args.output_dir ):\n\t\t\t\t\t\tos.makedirs(args.output_dir )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= os.path.join(args.output_dir\t\t\t\t\t, args.split )\n\t\t\t\tif not os.path.exists(UpperCamelCase__ ):\n\t\t\t\t\t\tos.makedirs(UpperCamelCase__ )\n\t\telse:\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= os.path.join(args.output_dir\t\t\t\t\t, args.split )\n\n\t\t# Tokenize the whole dataset at once.\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= tokenize_function(UpperCamelCase__ )\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= dataset.map(UpperCamelCase__\t\t\t\t\t, batched=UpperCamelCase__\t\t\t\t\t, num_proc=4\t\t\t\t\t, remove_columns=[\"\"\"text\"\"\"] )\n\n\t\t# We need to concatenate all our texts together, and then split the result\n\t\t# into chunks of a fixed size, which we will call block_size. To do this, we\n\t\t# will use the map method again, with the option batched=True. When we use batched=True,\n\t\t# the function we pass to map() will be passed multiple inputs at once, allowing us\n\t\t# to group them into more or fewer examples than we had in the input.\n\t\t# This allows us to create our new fixed-length samples. The advantage of this\n\t\t# method is that we don't lose a whole lot of content from the dataset compared to the\n\t\t# case where we simply tokenize with a pre-defined max_length.\n\n\t\tdef group_texts(UpperCamelCase__:\t\t\t\t\t\t\tint ):\n\t\t\t\t# Concatenate all texts.\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= {k: sum(examples[k]\t\t\t\t\t, [] ) for k in examples.keys()}\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= len(concatenated_examples[list(examples.keys() )[0]] )\n\t\t\t\t# We drop the small remainder, though you could add padding instead if the model supports it\n\t\t\t\t# In this, as in all things, we advise you to follow your heart 🫀\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= (total_length // args.max_length) * args.max_length\n\t\t\t\t# Split by chunks of max_len.\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= {\n\t\t\t\t k: [t[i : i + args.max_length] for i in range(0\t\t\t\t\t, UpperCamelCase__\t\t\t\t\t, args.max_length )]\n\t\t\t\t for k, t in concatenated_examples.items()\n\t\t\t\t}\n\t\t\t\treturn result\n\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= dataset_tokenized.map(UpperCamelCase__\t\t\t\t\t, batched=UpperCamelCase__\t\t\t\t\t, batch_size=1_000\t\t\t\t\t, num_proc=4 )\n\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= 0\n\t\tSCREAMING_SNAKE_CASE__\t\t\t= 0\n\t\tfor shard in range(0\t\t\t\t\t, len(UpperCamelCase__ )\t\t\t\t\t, args.shard_size ):\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= grouped_dataset[shard : shard + args.shard_size]\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= len(dataset_snapshot[\"\"\"input_ids\"\"\"] )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= os.path.join(UpperCamelCase__\t\t\t\t\t, f'''dataset-{shard_count}-{records_containing}.tfrecord''' )\n\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= get_serialized_examples(UpperCamelCase__ )\n\n\t\t\t\twith tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:\n\t\t\t\t\t\tfor i in range(len(UpperCamelCase__ ) ):\n\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t= serialized_examples[i]\n\t\t\t\t\t\t\t\tout_file.write(UpperCamelCase__ )\n\t\t\t\t\t\tprint(\"\"\"Wrote file {} containing {} records\"\"\".format(UpperCamelCase__\t\t\t\t\t, UpperCamelCase__ ) )\n\n\t\t\t\tshard_count += 1\n\t\t\t\ttotal_records += records_containing\n\n\t\twith open(f'''split-{args.split}-records-count.txt'''\t\t\t\t\t, \"\"\"w\"\"\" ) as f:\n\t\t\t\tprint(f'''Total {args.split} records: {total_records}'''\t\t\t\t\t, file=UpperCamelCase__ )\n\n\nif __name__ == \"__main__\":\n\t_lowerCamelCase \t\t\t\t=\t\t\tparse_args()\n\tmain(args)"},"style_context_codestyle":{"kind":"number","value":59,"string":"59"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":564,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\n'''simple docstring'''\n\n\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nfrom seqaseq_trainer import SeqaSeqTrainer\nfrom seqaseq_training_args import SeqaSeqTrainingArguments\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSeqaSeqLM,\n AutoTokenizer,\n HfArgumentParser,\n MBartTokenizer,\n MBartTokenizerFast,\n set_seed,\n)\nfrom transformers.trainer_utils import EvaluationStrategy, is_main_process\nfrom transformers.training_args import ParallelMode\nfrom utils import (\n SeqaSeqDataCollator,\n SeqaSeqDataset,\n assert_all_frozen,\n build_compute_metrics_fn,\n check_output_dir,\n freeze_embeds,\n freeze_params,\n lmap,\n save_json,\n use_task_specific_params,\n write_txt_file,\n)\n\n\nSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t=\t\t\t\t\t\tlogging.getLogger(__name__)\n@dataclass\nclass UpperCAmelCase_ :\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n UpperCamelCase_ = field(\n metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''}\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Pretrained config name or path if not the same as model_name'''}\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''}\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Whether tp freeze the encoder.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Whether to freeze the embeddings.'''}\t\t\t\t\t)\n\n\n\n\n\n@dataclass\nclass UpperCAmelCase_ :\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n UpperCamelCase_ = field(\n metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(\n default='''summarization'''\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''}\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=1024\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={\n '''help''': (\n '''The maximum total input sequence length after tokenization. Sequences longer '''\n '''than this will be truncated, sequences shorter will be padded.'''\n )\n }\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=128\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={\n '''help''': (\n '''The maximum total sequence length for target text after tokenization. Sequences longer '''\n '''than this will be truncated, sequences shorter will be padded.'''\n )\n }\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=142\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={\n '''help''': (\n '''The maximum total sequence length for validation target text after tokenization. Sequences longer '''\n '''than this will be truncated, sequences shorter will be padded. '''\n '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''\n '''during ``evaluate`` and ``predict``.'''\n )\n }\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=142\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={\n '''help''': (\n '''The maximum total sequence length for test target text after tokenization. Sequences longer '''\n '''than this will be truncated, sequences shorter will be padded.'''\n )\n }\t\t\t\t\t,\t\t\t\t\t\t\t)\n UpperCamelCase_ = field(default=-1\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''# training examples. -1 means use all.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=-1\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''# validation examples. -1 means use all.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=-1\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''# test examples. -1 means use all.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Source language id for translation.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''Target language id for translation.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''# num_beams to use for evaluation.'''}\t\t\t\t\t)\n UpperCamelCase_ = field(\n default=__A\t\t\t\t\t,\t\t\t\t\t\t\tmetadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''}\t\t\t\t\t,\t\t\t\t\t\t\t)\n\n\n\n\n\ndef \t\t\t\tlowercase_\t\t\t\t\t( __A\t\t\t:\t\t\t\t\t\t\tDict , __A\t\t\t:\t\t\t\t\t\t\tOptional[Any] , __A\t\t\t:\t\t\t\t\t\t\tUnion[str, Any] )\t\t\t\t\t\t\t-> List[Any]:\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n logger.info(F'***** {split} metrics *****' )\n for key in sorted(metrics.keys() ):\n logger.info(F' {key} = {metrics[key]}' )\n save_json(__A , os.path.join(__A , F'{split}_results.json' ) )\n\n\ndef \t\t\t\tlowercase_\t\t\t\t\t( )\t\t\t\t\t\t\t-> int:\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n lowercase : Union[str, Any]\t\t\t\t\t\t=HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )\n\n if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n lowercase\t,\t\tlowercase\t,\t\tlowercase : Optional[Any]\t\t\t\t\t\t=parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )\n else:\n lowercase\t,\t\tlowercase\t,\t\tlowercase : Optional[int]\t\t\t\t\t\t=parser.parse_args_into_dataclasses()\n\n check_output_dir(__A )\n\n # Setup logging\n logging.basicConfig(\n format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )\n logger.warning(\n '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(training_args.local_rank ):\n transformers.utils.logging.set_verbosity_info()\n logger.info('''Training/evaluation parameters %s''' , __A )\n\n # Set seed\n set_seed(training_args.seed )\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n lowercase : Union[str, Any]\t\t\t\t\t\t=AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )\n\n lowercase : Dict\t\t\t\t\t\t=('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')\n for p in extra_model_params:\n if getattr(__A , __A , __A ):\n assert hasattr(__A , __A ), F'({config.__class__.__name__}) doesn\\'t have a `{p}` attribute'\n setattr(__A , __A , getattr(__A , __A ) )\n\n lowercase : Optional[Any]\t\t\t\t\t\t=AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )\n lowercase : Dict\t\t\t\t\t\t=AutoModelForSeqaSeqLM.from_pretrained(\n model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__A , cache_dir=model_args.cache_dir , )\n\n # use task specific params\n use_task_specific_params(__A , data_args.task )\n\n # set num_beams for evaluation\n if data_args.eval_beams is None:\n lowercase : Dict\t\t\t\t\t\t=model.config.num_beams\n\n # set decoder_start_token_id for MBart\n if model.config.decoder_start_token_id is None and isinstance(__A , (MBartTokenizer, MBartTokenizerFast) ):\n assert (\n data_args.tgt_lang is not None and data_args.src_lang is not None\n ), \"mBart requires --tgt_lang and --src_lang\"\n if isinstance(__A , __A ):\n lowercase : int\t\t\t\t\t\t=tokenizer.lang_code_to_id[data_args.tgt_lang]\n else:\n lowercase : Tuple\t\t\t\t\t\t=tokenizer.convert_tokens_to_ids(data_args.tgt_lang )\n\n if model_args.freeze_embeds:\n freeze_embeds(__A )\n if model_args.freeze_encoder:\n freeze_params(model.get_encoder() )\n assert_all_frozen(model.get_encoder() )\n\n lowercase : int\t\t\t\t\t\t=SeqaSeqDataset\n\n # Get datasets\n lowercase : Optional[Any]\t\t\t\t\t\t=(\n dataset_class(\n __A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )\n if training_args.do_train\n else None\n )\n lowercase : Optional[int]\t\t\t\t\t\t=(\n dataset_class(\n __A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )\n if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO\n else None\n )\n lowercase : Any\t\t\t\t\t\t=(\n dataset_class(\n __A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )\n if training_args.do_predict\n else None\n )\n\n # Initialize our Trainer\n lowercase : Dict\t\t\t\t\t\t=(\n build_compute_metrics_fn(data_args.task , __A ) if training_args.predict_with_generate else None\n )\n lowercase : Optional[int]\t\t\t\t\t\t=SeqaSeqTrainer(\n model=__A , args=__A , data_args=__A , train_dataset=__A , eval_dataset=__A , data_collator=SeqaSeqDataCollator(\n __A , __A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__A , tokenizer=__A , )\n\n lowercase : List[str]\t\t\t\t\t\t={}\n # Training\n if training_args.do_train:\n logger.info('''*** Train ***''' )\n\n lowercase : Optional[int]\t\t\t\t\t\t=trainer.train(\n model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )\n lowercase : Any\t\t\t\t\t\t=train_result.metrics\n lowercase : Optional[int]\t\t\t\t\t\t=data_args.n_train\n\n trainer.save_model() # this also saves the tokenizer\n\n if trainer.is_world_process_zero():\n handle_metrics('''train''' , __A , training_args.output_dir )\n all_metrics.update(__A )\n\n # Need to save the state, since Trainer.save_model saves only the tokenizer with the model\n trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )\n\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n tokenizer.save_pretrained(training_args.output_dir )\n\n # Evaluation\n if training_args.do_eval:\n logger.info('''*** Evaluate ***''' )\n\n lowercase : int\t\t\t\t\t\t=trainer.evaluate(metric_key_prefix='''val''' )\n lowercase : str\t\t\t\t\t\t=data_args.n_val\n lowercase : Tuple\t\t\t\t\t\t=round(metrics['''val_loss'''] , 4 )\n\n if trainer.is_world_process_zero():\n handle_metrics('''val''' , __A , training_args.output_dir )\n all_metrics.update(__A )\n\n if training_args.do_predict:\n logger.info('''*** Predict ***''' )\n\n lowercase : Union[str, Any]\t\t\t\t\t\t=trainer.predict(test_dataset=__A , metric_key_prefix='''test''' )\n lowercase : List[Any]\t\t\t\t\t\t=test_output.metrics\n lowercase : Tuple\t\t\t\t\t\t=data_args.n_test\n\n if trainer.is_world_process_zero():\n lowercase : Union[str, Any]\t\t\t\t\t\t=round(metrics['''test_loss'''] , 4 )\n handle_metrics('''test''' , __A , training_args.output_dir )\n all_metrics.update(__A )\n\n if training_args.predict_with_generate:\n lowercase : Any\t\t\t\t\t\t=tokenizer.batch_decode(\n test_output.predictions , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )\n lowercase : Tuple\t\t\t\t\t\t=lmap(str.strip , __A )\n write_txt_file(__A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )\n\n if trainer.is_world_process_zero():\n save_json(__A , os.path.join(training_args.output_dir , '''all_results.json''' ) )\n\n return all_metrics\n\n\ndef \t\t\t\tlowercase_\t\t\t\t\t( __A\t\t\t:\t\t\t\t\t\t\tstr )\t\t\t\t\t\t\t-> Dict:\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n main()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n"},"code_codestyle":{"kind":"number","value":94,"string":"94"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport tensorflow as tf\r\n\r\nfrom ...tf_utils import shape_list\r\n\r\n\r\nclass a\t\t\t\t( tf.keras.layers.Layer\t\t\t\t\t\t\t):\r\n\r\n\r\n\r\n\t\tdef __init__(\tself\t:Tuple ,__lowercase\t:Optional[int] ,__lowercase\t:List[Any] ,__lowercase\t:int ,__lowercase\t:str ,__lowercase\t:List[str]=1 ,__lowercase\t:Optional[Any]=False ,**__lowercase\t:str ):\r\n\t\t\t\t\t\tsuper().__init__(**__lowercase )\r\n\r\n\t\t\t\t\t\tsnake_case__ : Union[str, Any]\t\t\t\t\t\t\t= vocab_size\r\n\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= d_embed\r\n\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= d_proj\r\n\r\n\t\t\t\t\t\tsnake_case__ : str\t\t\t\t\t\t\t= cutoffs + [vocab_size]\r\n\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= [0] + self.cutoffs\r\n\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= div_val\r\n\r\n\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.cutoffs[0]\r\n\t\t\t\t\t\tsnake_case__ : int\t\t\t\t\t\t\t= len(self.cutoffs ) - 1\r\n\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= self.shortlist_size + self.n_clusters\r\n\t\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= keep_order\r\n\r\n\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= []\r\n\t\t\t\t\t\tsnake_case__ : str\t\t\t\t\t\t\t= []\r\n\r\n\r\n\r\n\t\tdef __lowerCamelCase (\tself\t:str ,__lowercase\t:Optional[int] ):\r\n\t\t\t\t\t\tif self.n_clusters > 0:\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t shape=(self.n_clusters, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_weight''' )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t shape=(self.n_clusters,) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_bias''' )\r\n\r\n\t\t\t\t\t\tif self.div_val == 1:\r\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.cutoffs ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.d_proj != self.d_embed:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : int\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(self.d_embed, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_projs_._{i}\"\"\" ,)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.out_projs.append(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.out_projs.append(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(self.vocab_size, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_layers_._{i}_._weight\"\"\" ,)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : int\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(self.vocab_size,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_layers_._{i}_._bias\"\"\" ,)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.out_layers.append((weight, bias) )\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.cutoffs ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t,\t\t\t\t\tsnake_case__ : Optional[Any]\t\t\t\t\t\t\t= self.cutoff_ends[i], self.cutoff_ends[i + 1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= self.d_embed // (self.div_val**i)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : str\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(d_emb_i, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_projs_._{i}\"\"\" )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.out_projs.append(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(r_idx - l_idx, d_emb_i) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_layers_._{i}_._weight\"\"\" ,)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.add_weight(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t shape=(r_idx - l_idx,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F\"\"\"out_layers_._{i}_._bias\"\"\" ,)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.out_layers.append((weight, bias) )\r\n\t\t\t\t\t\tsuper().build(__lowercase )\r\n\r\n\r\n\r\n\t\t@staticmethod\r\n\t\tdef __lowerCamelCase (\t__lowercase\t:List[str] ,__lowercase\t:int ,__lowercase\t:str ,__lowercase\t:Dict=None ):\r\n\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= x\r\n\t\t\t\t\t\tif proj is not None:\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : str\t\t\t\t\t\t\t= tf.einsum('''ibd,ed->ibe''' ,__lowercase ,__lowercase )\r\n\t\t\t\t\t\treturn tf.einsum('''ibd,nd->ibn''' ,__lowercase ,__lowercase ) + b\r\n\r\n\r\n\r\n\t\t@staticmethod\r\n\t\tdef __lowerCamelCase (\t__lowercase\t:int ,__lowercase\t:Any ):\r\n\t\t\t\t\t\tsnake_case__ : Union[str, Any]\t\t\t\t\t\t\t= shape_list(__lowercase )\r\n\t\t\t\t\t\tsnake_case__ : int\t\t\t\t\t\t\t= tf.range(lp_size[0] ,dtype=target.dtype )\r\n\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= tf.stack([r, target] ,1 )\r\n\t\t\t\t\t\treturn tf.gather_nd(__lowercase ,__lowercase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\tdef __lowerCamelCase (\tself\t:str ,__lowercase\t:Optional[int] ,__lowercase\t:Tuple ,__lowercase\t:Union[str, Any]=True ,__lowercase\t:str=False ):\r\n\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= 0\r\n\t\t\t\t\t\tif self.n_clusters == 0:\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : int\t\t\t\t\t\t\t= self._logit(__lowercase ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )\r\n\t\t\t\t\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[Any]\t\t\t\t\t\t\t= tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowercase ,logits=__lowercase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= tf.nn.log_softmax(__lowercase ,axis=-1 )\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= shape_list(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= []\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= tf.zeros(hidden_sizes[:2] )\r\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.cutoffs ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__\t\t\t\t\t\t,\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= self.cutoff_ends[i], self.cutoff_ends[i + 1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= (target >= l_idx) & (target < r_idx)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= tf.where(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= tf.boolean_mask(__lowercase ,__lowercase ) - l_idx\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.div_val == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[Any]\t\t\t\t\t\t\t= self.out_layers[0][0][l_idx:r_idx]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= self.out_layers[0][1][l_idx:r_idx]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.out_layers[i][0]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[int]\t\t\t\t\t\t\t= self.out_layers[i][1]\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif i == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= tf.concat([cur_W, self.cluster_weight] ,0 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Union[str, Any]\t\t\t\t\t\t\t= tf.concat([cur_b, self.cluster_bias] ,0 )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[0] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= tf.nn.log_softmax(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout.append(head_logprob[..., : self.cutoffs[0]] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= tf.boolean_mask(__lowercase ,__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Union[str, Any]\t\t\t\t\t\t\t= self._gather_logprob(__lowercase ,__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[i] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= tf.nn.log_softmax(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Optional[Any]\t\t\t\t\t\t\t= self.cutoffs[0] + i - 1 # No probability for the head cluster\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Union[str, Any]\t\t\t\t\t\t\t= head_logprob[..., cluster_prob_idx, None] + tail_logprob\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout.append(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Dict\t\t\t\t\t\t\t= tf.boolean_mask(__lowercase ,__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : Tuple\t\t\t\t\t\t\t= tf.boolean_mask(__lowercase ,__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : str\t\t\t\t\t\t\t= self._gather_logprob(__lowercase ,__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tloss += tf.scatter_nd(__lowercase ,-cur_logprob ,shape_list(__lowercase ) )\r\n\t\t\t\t\t\t\t\t\t\tsnake_case__ : Any\t\t\t\t\t\t\t= tf.concat(__lowercase ,axis=-1 )\r\n\r\n\t\t\t\t\t\tif target is not None:\r\n\t\t\t\t\t\t\t\t\t\tif return_mean:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case__ : List[str]\t\t\t\t\t\t\t= tf.reduce_mean(__lowercase )\r\n\t\t\t\t\t\t\t\t\t\t# Add the training-time loss value to the layer using `self.add_loss()`.\r\n\t\t\t\t\t\t\t\t\t\tself.add_loss(__lowercase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t# Log the loss as a metric (we could log arbitrary metrics,\r\n\t\t\t\t\t\t\t\t\t\t# including different metrics for training and inference.\r\n\t\t\t\t\t\t\t\t\t\tself.add_metric(__lowercase ,name=self.name ,aggregation='''mean''' if return_mean else '''''' )\r\n\r\n\t\t\t\t\t\treturn out\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":252,"string":"252"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":565,"cells":{"code":{"kind":"string","value":"\r\r\r\rfrom __future__ import annotations\r\rfrom dataclasses import dataclass\r\r\r\r\r\r@dataclass\rclass __magic_name__ :\r\t\tlowercase : Tuple =42\r\t\tlowercase : Any =None\r\t\tlowercase : List[Any] =None\r\rdef lowerCamelCase_(lowerCamelCase_ ) -> List[Any]:\r\r\t\t\t\t\t\t# Validation\r\t\t\t\t\t\tdef is_valid_tree(lowerCamelCase_ ) -> bool:\r\t\t\t\t\t\t\t\t\t\t\t\tif node is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\r\t\t\t\t\t\t\t\t\t\t\t\tif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\r\t\t\t\t\t\t\t\t\t\t\t\ttry:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfloat(node.data )\r\t\t\t\t\t\t\t\t\t\t\t\texcept (TypeError, ValueError):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\r\t\t\t\t\t\t\t\t\t\t\t\treturn is_valid_tree(node.left ) and is_valid_tree(node.right )\r\r\t\t\t\t\t\tif not is_valid_tree(lowerCAmelCase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t \"Each node should be type of TreeNode and data should be float.\" )\r\r\t\t\t\t\t\tdef is_binary_search_tree_recursive_check(\r\t\t\t\t\t\t lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:\r\r\t\t\t\t\t\t\t\t\t\t\t\tif node is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\r\t\t\t\t\t\t\t\t\t\t\t\treturn (\r\t\t\t\t\t\t\t\t\t\t\t\t left_bound < node.data < right_bound\r\t\t\t\t\t\t\t\t\t\t\t\t and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )\r\t\t\t\t\t\t\t\t\t\t\t\t and is_binary_search_tree_recursive_check(\r\t\t\t\t\t\t\t\t\t\t\t\t node.right , node.data , lowerCAmelCase__ )\r\t\t\t\t\t\t\t\t\t\t\t\t)\r\r\t\t\t\t\t\treturn is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float(\"inf\" ) , float(\"inf\" ) )\r\r\rif __name__ == \"__main__\":\r\t\timport doctest\r\r\t\tdoctest.testmod()\r\r\r\r"},"code_codestyle":{"kind":"number","value":714,"string":"714"},"style_context":{"kind":"string","value":"\r\r\r\rfrom typing import Dict, List, Optional, Union\r\rimport numpy as np\r\rfrom ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict\rfrom ...image_transforms import (\r center_crop,\r get_resize_output_image_size,\r normalize,\r rescale,\r resize,\r to_channel_dimension_format,\r)\rfrom ...image_utils import (\r IMAGENET_STANDARD_MEAN,\r IMAGENET_STANDARD_STD,\r ChannelDimension,\r ImageInput,\r PILImageResampling,\r make_list_of_images,\r to_numpy_array,\r valid_images,\r)\rfrom ...utils import TensorType, is_vision_available, logging\r\r\rif is_vision_available():\r\t\timport PIL\r\r\r__lowerCamelCase :\t\t\t\t\t\t\tList[Any] \t\t\t\t\t\t\t=\t\t\t\tlogging.get_logger(__name__)\r\r\r\r\r\rclass __magic_name__ (\t\t\t\t\t\t\tA__\t\t\t\t\t\t):\r\t\tlowercase : Tuple =['''pixel_values''']\r\r\r\r\r\r\r\t\tdef __init__(\t\t\t\t\t\tself : Any\t\t\t\t,\t\tUpperCamelCase__ : bool = True\t\t\t\t,\t\tUpperCamelCase__ : Dict[str, int] = None\t\t\t\t,\t\tUpperCamelCase__ : float = None\t\t\t\t,\t\tUpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR\t\t\t\t,\t\tUpperCamelCase__ : bool = True\t\t\t\t,\t\tUpperCamelCase__ : Union[int, float] = 1 / 2_55\t\t\t\t,\t\tUpperCamelCase__ : bool = True\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[float, List[float]]] = None\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[float, List[float]]] = None\t\t\t\t,\t\t**UpperCamelCase__ : List[str]\t\t\t\t,\t\t)\t\t\t\t->\t\t\t\t\tNone:\r\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tsuper().__init__(**UpperCamelCase__ )\r\t\t\t\t\t\t\t\tUpperCAmelCase = size if size is not None else {\"shortest_edge\": 3_84}\r\t\t\t\t\t\t\t\tUpperCAmelCase = get_size_dict(UpperCamelCase__\t\t\t\t,\t\tdefault_to_square=UpperCamelCase__ )\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_resize\r\t\t\t\t\t\t\t\tUpperCAmelCase = size\r\t\t\t\t\t\t\t\t# Default value set here for backwards compatibility where the value in config is None\r\t\t\t\t\t\t\t\tUpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56\r\t\t\t\t\t\t\t\tUpperCAmelCase = resample\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_rescale\r\t\t\t\t\t\t\t\tUpperCAmelCase = rescale_factor\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_normalize\r\t\t\t\t\t\t\t\tUpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN\r\t\t\t\t\t\t\t\tUpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD\r\r\r\r\r\r\r\t\tdef SCREAMING_SNAKE_CASE_\t\t\t(\t\t\t\t\t\tself : Any\t\t\t\t,\t\tUpperCamelCase__ : np.ndarray\t\t\t\t,\t\tUpperCamelCase__ : Dict[str, int]\t\t\t\t,\t\tUpperCamelCase__ : float\t\t\t\t,\t\tUpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None\t\t\t\t,\t\t**UpperCamelCase__ : Union[str, Any]\t\t\t\t,\t\t)\t\t\t\t->\t\t\t\t\tnp.ndarray:\r\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = get_size_dict(UpperCamelCase__\t\t\t\t,\t\tdefault_to_square=UpperCamelCase__ )\r\t\t\t\t\t\t\t\tif \"shortest_edge\" not in size:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(F'Size dictionary must contain \\'shortest_edge\\' key. Got {size.keys()}' )\r\t\t\t\t\t\t\t\tUpperCAmelCase = size[\"shortest_edge\"]\r\r\t\t\t\t\t\t\t\tif shortest_edge < 3_84:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = int(shortest_edge / crop_pct )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = get_resize_output_image_size(UpperCamelCase__\t\t\t\t,\t\tsize=UpperCamelCase__\t\t\t\t,\t\tdefault_to_square=UpperCamelCase__ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = resize(image=UpperCamelCase__\t\t\t\t,\t\tsize=UpperCamelCase__\t\t\t\t,\t\tresample=UpperCamelCase__\t\t\t\t,\t\tdata_format=UpperCamelCase__\t\t\t\t,\t\t**UpperCamelCase__ )\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# then crop to (shortest_edge, shortest_edge)\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn center_crop(image=UpperCamelCase__\t\t\t\t,\t\tsize=(shortest_edge, shortest_edge)\t\t\t\t,\t\tdata_format=UpperCamelCase__\t\t\t\t,\t\t**UpperCamelCase__ )\r\t\t\t\t\t\t\t\telse:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t# warping (no cropping) when evaluated at 384 or larger\r\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn resize(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t UpperCamelCase__\t\t\t\t,\t\tsize=(shortest_edge, shortest_edge)\t\t\t\t,\t\tresample=UpperCamelCase__\t\t\t\t,\t\tdata_format=UpperCamelCase__\t\t\t\t,\t\t**UpperCamelCase__ )\r\r\r\r\r\r\r\t\tdef SCREAMING_SNAKE_CASE_\t\t\t(\t\t\t\t\t\tself : Optional[Any]\t\t\t\t,\t\tUpperCamelCase__ : np.ndarray\t\t\t\t,\t\tUpperCamelCase__ : Union[int, float]\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None\t\t\t\t,\t\t**UpperCamelCase__ : Optional[Any]\t\t\t\t,\t\t)\t\t\t\t->\t\t\t\t\tList[str]:\r\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\treturn rescale(UpperCamelCase__\t\t\t\t,\t\tscale=UpperCamelCase__\t\t\t\t,\t\tdata_format=UpperCamelCase__\t\t\t\t,\t\t**UpperCamelCase__ )\r\r\r\r\r\r\r\t\tdef SCREAMING_SNAKE_CASE_\t\t\t(\t\t\t\t\t\tself : List[str]\t\t\t\t,\t\tUpperCamelCase__ : np.ndarray\t\t\t\t,\t\tUpperCamelCase__ : Union[float, List[float]]\t\t\t\t,\t\tUpperCamelCase__ : Union[float, List[float]]\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None\t\t\t\t,\t\t**UpperCamelCase__ : Optional[Any]\t\t\t\t,\t\t)\t\t\t\t->\t\t\t\t\tnp.ndarray:\r\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\treturn normalize(UpperCamelCase__\t\t\t\t,\t\tmean=UpperCamelCase__\t\t\t\t,\t\tstd=UpperCamelCase__\t\t\t\t,\t\tdata_format=UpperCamelCase__\t\t\t\t,\t\t**UpperCamelCase__ )\r\r\r\r\r\r\r\r\t\tdef SCREAMING_SNAKE_CASE_\t\t\t(\t\t\t\t\t\tself : Tuple\t\t\t\t,\t\tUpperCamelCase__ : ImageInput\t\t\t\t,\t\tUpperCamelCase__ : bool = None\t\t\t\t,\t\tUpperCamelCase__ : Dict[str, int] = None\t\t\t\t,\t\tUpperCamelCase__ : float = None\t\t\t\t,\t\tUpperCamelCase__ : PILImageResampling = None\t\t\t\t,\t\tUpperCamelCase__ : bool = None\t\t\t\t,\t\tUpperCamelCase__ : float = None\t\t\t\t,\t\tUpperCamelCase__ : bool = None\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[float, List[float]]] = None\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[float, List[float]]] = None\t\t\t\t,\t\tUpperCamelCase__ : Optional[Union[str, TensorType]] = None\t\t\t\t,\t\tUpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST\t\t\t\t,\t\t**UpperCamelCase__ : Any\t\t\t\t,\t\t)\t\t\t\t->\t\t\t\t\tPIL.Image.Image:\r\t\t\t\t\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_resize if do_resize is not None else self.do_resize\r\t\t\t\t\t\t\t\tUpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct\r\t\t\t\t\t\t\t\tUpperCAmelCase = resample if resample is not None else self.resample\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale\r\t\t\t\t\t\t\t\tUpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor\r\t\t\t\t\t\t\t\tUpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize\r\t\t\t\t\t\t\t\tUpperCAmelCase = image_mean if image_mean is not None else self.image_mean\r\t\t\t\t\t\t\t\tUpperCAmelCase = image_std if image_std is not None else self.image_std\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = size if size is not None else self.size\r\t\t\t\t\t\t\t\tUpperCAmelCase = get_size_dict(UpperCamelCase__\t\t\t\t,\t\tdefault_to_square=UpperCamelCase__ )\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = make_list_of_images(UpperCamelCase__ )\r\r\t\t\t\t\t\t\t\tif not valid_images(UpperCamelCase__ ):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\r\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"torch.Tensor, tf.Tensor or jax.ndarray.\" )\r\r\t\t\t\t\t\t\t\tif do_resize and size is None or resample is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"Size and resample must be specified if do_resize is True.\" )\r\r\t\t\t\t\t\t\t\tif do_resize and size[\"shortest_edge\"] < 3_84 and crop_pct is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"crop_pct must be specified if size < 384.\" )\r\r\t\t\t\t\t\t\t\tif do_rescale and rescale_factor is None:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"Rescale factor must be specified if do_rescale is True.\" )\r\r\t\t\t\t\t\t\t\tif do_normalize and (image_mean is None or image_std is None):\r\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"Image mean and std must be specified if do_normalize is True.\" )\r\r\t\t\t\t\t\t\t\t# All transformations expect numpy arrays.\r\t\t\t\t\t\t\t\tUpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]\r\r\t\t\t\t\t\t\t\tif do_resize:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = [self.resize(image=UpperCamelCase__\t\t\t\t,\t\tsize=UpperCamelCase__\t\t\t\t,\t\tcrop_pct=UpperCamelCase__\t\t\t\t,\t\tresample=UpperCamelCase__ ) for image in images]\r\r\t\t\t\t\t\t\t\tif do_rescale:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = [self.rescale(image=UpperCamelCase__\t\t\t\t,\t\tscale=UpperCamelCase__ ) for image in images]\r\r\t\t\t\t\t\t\t\tif do_normalize:\r\t\t\t\t\t\t\t\t\t\t\t\t\t\tUpperCAmelCase = [self.normalize(image=UpperCamelCase__\t\t\t\t,\t\tmean=UpperCamelCase__\t\t\t\t,\t\tstd=UpperCamelCase__ ) for image in images]\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__\t\t\t\t,\t\tUpperCamelCase__ ) for image in images]\r\r\t\t\t\t\t\t\t\tUpperCAmelCase = {\"pixel_values\": images}\r\t\t\t\t\t\t\t\treturn BatchFeature(data=UpperCamelCase__\t\t\t\t,\t\ttensor_type=UpperCamelCase__ )\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":457,"string":"457"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":566,"cells":{"code":{"kind":"string","value":"\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\nimport collections\nimport gzip\nimport os\nimport urllib\n\nimport numpy\nfrom tensorflow.python.framework import dtypes, random_seed\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util.deprecation import deprecated\n\nA_\t: List[str] \t\t\t\t=collections.namedtuple(\"\"\"_Datasets\"\"\", [\"\"\"train\"\"\", \"\"\"validation\"\"\", \"\"\"test\"\"\"])\n\n# CVDF mirror of http://yann.lecun.com/exdb/mnist/\nA_\t: Any \t\t\t\t=\"\"\"https://storage.googleapis.com/cvdf-datasets/mnist/\"\"\"\n\n\n\n\n\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : Tuple\t\t\t\t\t\t)->\t\t\t\t\t\tOptional[int]:\n _lowerCamelCase = numpy.dtype(numpy.uintaa\t\t\t\t\t\t).newbyteorder('>'\t\t\t\t\t\t)\n return numpy.frombuffer(bytestream.read(4\t\t\t\t\t\t)\t\t, dtype=snake_case\t\t\t\t\t\t)[0]\n\n\n\n\n\n@deprecated(snake_case\t\t, 'Please use tf.data to implement this functionality.'\t\t\t\t\t\t)\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : Union[str, Any]\t\t\t\t\t\t)->\t\t\t\t\t\tDict:\n print('Extracting'\t\t, f.name\t\t\t\t\t\t)\n with gzip.GzipFile(fileobj=snake_case\t\t\t\t\t\t) as bytestream:\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n if magic != 2_051:\n raise ValueError(\n 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name)\t\t\t\t\t\t)\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = bytestream.read(rows * cols * num_images\t\t\t\t\t\t)\n _lowerCamelCase = numpy.frombuffer(snake_case\t\t, dtype=numpy.uinta\t\t\t\t\t\t)\n _lowerCamelCase = data.reshape(snake_case\t\t, snake_case\t\t, snake_case\t\t, 1\t\t\t\t\t\t)\n return data\n\n\n\n\n\n@deprecated(snake_case\t\t, 'Please use tf.one_hot on tensors.'\t\t\t\t\t\t)\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : List[Any]\t\t, snake_case : List[str]\t\t\t\t\t\t)->\t\t\t\t\t\tTuple:\n _lowerCamelCase = labels_dense.shape[0]\n _lowerCamelCase = numpy.arange(snake_case\t\t\t\t\t\t) * num_classes\n _lowerCamelCase = numpy.zeros((num_labels, num_classes)\t\t\t\t\t\t)\n _lowerCamelCase = 1\n return labels_one_hot\n\n\n\n\n\n@deprecated(snake_case\t\t, 'Please use tf.data to implement this functionality.'\t\t\t\t\t\t)\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : str\t\t, snake_case : Optional[int]=False\t\t, snake_case : str=10\t\t\t\t\t\t)->\t\t\t\t\t\tList[Any]:\n print('Extracting'\t\t, f.name\t\t\t\t\t\t)\n with gzip.GzipFile(fileobj=snake_case\t\t\t\t\t\t) as bytestream:\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n if magic != 2_049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name)\t\t\t\t\t\t)\n _lowerCamelCase = _readaa(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = bytestream.read(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = numpy.frombuffer(snake_case\t\t, dtype=numpy.uinta\t\t\t\t\t\t)\n if one_hot:\n return _dense_to_one_hot(snake_case\t\t, snake_case\t\t\t\t\t\t)\n return labels\n\n\n\n\n\n\n\nclass __a :\n\n\n\n @deprecated(\n a__ ,\t'Please use alternatives such as official/mnist/_DataSet.py'\n ' from tensorflow/models.' ,\t)\n def __init__(\t\t\tself ,\ta__ ,\ta__ ,\ta__=False ,\ta__=False ,\ta__=dtypes.floataa ,\ta__=True ,\ta__=None ,\t):\n _lowerCamelCase , _lowerCamelCase = random_seed.get_seed(a__ )\n # If op level seed is not set, use whatever graph level seed is returned\n numpy.random.seed(seeda if seed is None else seeda )\n _lowerCamelCase = dtypes.as_dtype(a__ ).base_dtype\n if dtype not in (dtypes.uinta, dtypes.floataa):\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )\n if fake_data:\n _lowerCamelCase = 1_00_00\n _lowerCamelCase = one_hot\n else:\n assert (\n images.shape[0] == labels.shape[0]\n ), F'images.shape: {images.shape} labels.shape: {labels.shape}'\n _lowerCamelCase = images.shape[0]\n\n # Convert shape from [num examples, rows, columns, depth]\n # to [num examples, rows*columns] (assuming depth == 1)\n if reshape:\n assert images.shape[3] == 1\n _lowerCamelCase = images.reshape(\n images.shape[0] ,\timages.shape[1] * images.shape[2] )\n if dtype == dtypes.floataa:\n # Convert from [0, 255] -> [0.0, 1.0].\n _lowerCamelCase = images.astype(numpy.floataa )\n _lowerCamelCase = numpy.multiply(a__ ,\t1.0 / 255.0 )\n _lowerCamelCase = images\n _lowerCamelCase = labels\n _lowerCamelCase = 0\n _lowerCamelCase = 0\n\n\n\n @property\n def \t\tsnake_case_ (\t\t\tself ):\n return self._images\n\n\n\n @property\n def \t\tsnake_case_ (\t\t\tself ):\n return self._labels\n\n\n\n @property\n def \t\tsnake_case_ (\t\t\tself ):\n return self._num_examples\n\n\n\n @property\n def \t\tsnake_case_ (\t\t\tself ):\n return self._epochs_completed\n\n\n\n def \t\tsnake_case_ (\t\t\tself ,\ta__ ,\ta__=False ,\ta__=True ):\n if fake_data:\n _lowerCamelCase = [1] * 7_84\n _lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0\n return (\n [fake_image for _ in range(a__ )],\n [fake_label for _ in range(a__ )],\n )\n _lowerCamelCase = self._index_in_epoch\n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n _lowerCamelCase = numpy.arange(self._num_examples )\n numpy.random.shuffle(a__ )\n _lowerCamelCase = self.images[perma]\n _lowerCamelCase = self.labels[perma]\n # Go to the next epoch\n if start + batch_size > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n _lowerCamelCase = self._num_examples - start\n _lowerCamelCase = self._images[start : self._num_examples]\n _lowerCamelCase = self._labels[start : self._num_examples]\n # Shuffle the data\n if shuffle:\n _lowerCamelCase = numpy.arange(self._num_examples )\n numpy.random.shuffle(a__ )\n _lowerCamelCase = self.images[perm]\n _lowerCamelCase = self.labels[perm]\n # Start next epoch\n _lowerCamelCase = 0\n _lowerCamelCase = batch_size - rest_num_examples\n _lowerCamelCase = self._index_in_epoch\n _lowerCamelCase = self._images[start:end]\n _lowerCamelCase = self._labels[start:end]\n return (\n numpy.concatenate((images_rest_part, images_new_part) ,\taxis=0 ),\n numpy.concatenate((labels_rest_part, labels_new_part) ,\taxis=0 ),\n )\n else:\n self._index_in_epoch += batch_size\n _lowerCamelCase = self._index_in_epoch\n return self._images[start:end], self._labels[start:end]\n\n\n\n\n\n@deprecated(snake_case\t\t, 'Please write your own downloading logic.'\t\t\t\t\t\t)\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : List[Any]\t\t, snake_case : Union[str, Any]\t\t, snake_case : int\t\t\t\t\t\t)->\t\t\t\t\t\tint:\n if not gfile.Exists(snake_case\t\t\t\t\t\t):\n gfile.MakeDirs(snake_case\t\t\t\t\t\t)\n _lowerCamelCase = os.path.join(snake_case\t\t, snake_case\t\t\t\t\t\t)\n if not gfile.Exists(snake_case\t\t\t\t\t\t):\n urllib.request.urlretrieve(snake_case\t\t, snake_case\t\t\t\t\t\t) # noqa: S310\n with gfile.GFile(snake_case\t\t\t\t\t\t) as f:\n _lowerCamelCase = f.size()\n print('Successfully downloaded'\t\t, snake_case\t\t, snake_case\t\t, 'bytes.'\t\t\t\t\t\t)\n return filepath\n\n\n\n\n\n@deprecated(\n snake_case\t\t, 'Please use alternatives such as:' ' tensorflow_datasets.load(\\'mnist\\')'\t\t\t\t\t\t)\ndef SCREAMING_SNAKE_CASE_ (\t\t\tsnake_case : Optional[int]\t\t, snake_case : Dict=False\t\t, snake_case : Tuple=False\t\t, snake_case : str=dtypes.floataa\t\t, snake_case : str=True\t\t, snake_case : Union[str, Any]=5_000\t\t, snake_case : List[Any]=None\t\t, snake_case : str=DEFAULT_SOURCE_URL\t\t, )->\t\t\t\t\t\tTuple:\n if fake_data:\n\n def fake():\n return _DataSet(\n []\t\t, []\t\t, fake_data=snake_case\t\t, one_hot=snake_case\t\t, dtype=snake_case\t\t, seed=snake_case\t\t\t\t\t\t)\n\n _lowerCamelCase = fake()\n _lowerCamelCase = fake()\n _lowerCamelCase = fake()\n return _Datasets(train=snake_case\t\t, validation=snake_case\t\t, test=snake_case\t\t\t\t\t\t)\n\n if not source_url: # empty string check\n _lowerCamelCase = DEFAULT_SOURCE_URL\n\n _lowerCamelCase = 'train-images-idx3-ubyte.gz'\n _lowerCamelCase = 'train-labels-idx1-ubyte.gz'\n _lowerCamelCase = 't10k-images-idx3-ubyte.gz'\n _lowerCamelCase = 't10k-labels-idx1-ubyte.gz'\n\n _lowerCamelCase = _maybe_download(\n snake_case\t\t, snake_case\t\t, source_url + train_images_file\t\t\t\t\t\t)\n with gfile.Open(snake_case\t\t, 'rb'\t\t\t\t\t\t) as f:\n _lowerCamelCase = _extract_images(snake_case\t\t\t\t\t\t)\n\n _lowerCamelCase = _maybe_download(\n snake_case\t\t, snake_case\t\t, source_url + train_labels_file\t\t\t\t\t\t)\n with gfile.Open(snake_case\t\t, 'rb'\t\t\t\t\t\t) as f:\n _lowerCamelCase = _extract_labels(snake_case\t\t, one_hot=snake_case\t\t\t\t\t\t)\n\n _lowerCamelCase = _maybe_download(\n snake_case\t\t, snake_case\t\t, source_url + test_images_file\t\t\t\t\t\t)\n with gfile.Open(snake_case\t\t, 'rb'\t\t\t\t\t\t) as f:\n _lowerCamelCase = _extract_images(snake_case\t\t\t\t\t\t)\n\n _lowerCamelCase = _maybe_download(\n snake_case\t\t, snake_case\t\t, source_url + test_labels_file\t\t\t\t\t\t)\n with gfile.Open(snake_case\t\t, 'rb'\t\t\t\t\t\t) as f:\n _lowerCamelCase = _extract_labels(snake_case\t\t, one_hot=snake_case\t\t\t\t\t\t)\n\n if not 0 <= validation_size <= len(snake_case\t\t\t\t\t\t):\n _lowerCamelCase = (\n 'Validation size should be between 0 and '\n f'{len(snake_case\t\t\t\t\t\t)}. Received: {validation_size}.'\n )\n raise ValueError(snake_case\t\t\t\t\t\t)\n\n _lowerCamelCase = train_images[:validation_size]\n _lowerCamelCase = train_labels[:validation_size]\n _lowerCamelCase = train_images[validation_size:]\n _lowerCamelCase = train_labels[validation_size:]\n\n _lowerCamelCase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}\n\n _lowerCamelCase = _DataSet(snake_case\t\t, snake_case\t\t, **snake_case\t\t\t\t\t\t)\n _lowerCamelCase = _DataSet(snake_case\t\t, snake_case\t\t, **snake_case\t\t\t\t\t\t)\n _lowerCamelCase = _DataSet(snake_case\t\t, snake_case\t\t, **snake_case\t\t\t\t\t\t)\n\n return _Datasets(train=snake_case\t\t, validation=snake_case\t\t, test=snake_case\t\t\t\t\t\t)\n\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":650,"string":"650"},"style_context":{"kind":"string","value":"\n\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\nimport copy\nimport json\nimport os\nimport tempfile\n\nfrom transformers import is_torch_available\n\nfrom .test_configuration_utils import config_common_kwargs\n\n\n\n\n\n\n\nclass __a ( lowerCAmelCase__\t\t\t\t\t\t):\n\n\n\n def __init__(\t\t\tself ,\ta__ ,\ta__=None ,\ta__=True ,\ta__=None ,\t**a__ ):\n _lowerCamelCase = parent\n _lowerCamelCase = config_class\n _lowerCamelCase = has_text_modality\n _lowerCamelCase = kwargs\n _lowerCamelCase = common_properties\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict )\n _lowerCamelCase = (\n ['hidden_size', 'num_attention_heads', 'num_hidden_layers']\n if self.common_properties is None\n else self.common_properties\n )\n\n # Add common fields for text models\n if self.has_text_modality:\n common_properties.extend(['vocab_size'] )\n\n # Test that config has the common properties as getters\n for prop in common_properties:\n self.parent.assertTrue(hasattr(a__ ,\ta__ ) ,\tmsg=F'`{prop}` does not exist' )\n\n # Test that config has the common properties as setter\n for idx, name in enumerate(a__ ):\n try:\n setattr(a__ ,\ta__ ,\ta__ )\n self.parent.assertEqual(\n getattr(a__ ,\ta__ ) ,\ta__ ,\tmsg=F'`{name} value {idx} expected, but was {getattr(a__ ,\ta__ )}' )\n except NotImplementedError:\n # Some models might not be able to implement setters for common_properties\n # In that case, a NotImplementedError is raised\n pass\n\n # Test if config class can be called with Config(prop_name=..)\n for idx, name in enumerate(a__ ):\n try:\n _lowerCamelCase = self.config_class(**{name: idx} )\n self.parent.assertEqual(\n getattr(a__ ,\ta__ ) ,\ta__ ,\tmsg=F'`{name} value {idx} expected, but was {getattr(a__ ,\ta__ )}' )\n except NotImplementedError:\n # Some models might not be able to implement setters for common_properties\n # In that case, a NotImplementedError is raised\n pass\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict )\n _lowerCamelCase = json.loads(config.to_json_string() )\n for key, value in self.inputs_dict.items():\n self.parent.assertEqual(obj[key] ,\ta__ )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict )\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n _lowerCamelCase = os.path.join(a__ ,\t'config.json' )\n config_first.to_json_file(a__ )\n _lowerCamelCase = self.config_class.from_json_file(a__ )\n\n self.parent.assertEqual(config_second.to_dict() ,\tconfig_first.to_dict() )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict )\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n config_first.save_pretrained(a__ )\n _lowerCamelCase = self.config_class.from_pretrained(a__ )\n\n self.parent.assertEqual(config_second.to_dict() ,\tconfig_first.to_dict() )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict )\n\n _lowerCamelCase = 'test'\n with tempfile.TemporaryDirectory() as tmpdirname:\n _lowerCamelCase = os.path.join(a__ ,\ta__ )\n config_first.save_pretrained(a__ )\n _lowerCamelCase = self.config_class.from_pretrained(a__ ,\tsubfolder=a__ )\n\n self.parent.assertEqual(config_second.to_dict() ,\tconfig_first.to_dict() )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = self.config_class(**self.inputs_dict ,\tnum_labels=5 )\n self.parent.assertEqual(len(config.idalabel ) ,\t5 )\n self.parent.assertEqual(len(config.labelaid ) ,\t5 )\n\n _lowerCamelCase = 3\n self.parent.assertEqual(len(config.idalabel ) ,\t3 )\n self.parent.assertEqual(len(config.labelaid ) ,\t3 )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n if self.config_class.is_composition:\n return\n _lowerCamelCase = self.config_class()\n self.parent.assertIsNotNone(a__ )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n _lowerCamelCase = copy.deepcopy(a__ )\n _lowerCamelCase = self.config_class(**a__ )\n _lowerCamelCase = []\n for key, value in config_common_kwargs.items():\n if key == \"torch_dtype\":\n if not is_torch_available():\n continue\n else:\n import torch\n\n if config.torch_dtype != torch.floataa:\n wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )\n elif getattr(a__ ,\ta__ ) != value:\n wrong_values.append((key, getattr(a__ ,\ta__ ), value) )\n\n if len(a__ ) > 0:\n _lowerCamelCase = '\\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )\n raise ValueError(F'The following keys were not properly set in the config:\\n{errors}' )\n\n\n\n def \t\tsnake_case_ (\t\t\tself ):\n self.create_and_test_config_common_properties()\n self.create_and_test_config_to_json_string()\n self.create_and_test_config_to_json_file()\n self.create_and_test_config_from_and_save_pretrained()\n self.create_and_test_config_from_and_save_pretrained_subfolder()\n self.create_and_test_config_with_num_labels()\n self.check_config_can_be_init_without_params()\n self.check_config_arguments_init()\n\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":650,"string":"650"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":567,"cells":{"code":{"kind":"string","value":"\n'''simple docstring'''\n\n\nimport argparse\nimport os\nimport re\n\nimport torch\nfrom flax.traverse_util import flatten_dict\nfrom tax import checkpoints\n\nfrom transformers import (\n AutoTokenizer,\n PixaStructConfig,\n PixaStructForConditionalGeneration,\n PixaStructImageProcessor,\n PixaStructProcessor,\n PixaStructTextConfig,\n PixaStructVisionConfig,\n)\n\n\n\n\ndef lowercase_\t( _lowercase\t\t\t\t) ->\t\t\t\t\t\t\tDict:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Dict =\t\tcheckpoints.load_tax_checkpoint(_lowercase\t\t\t\t)\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Dict =\t\tflatten_dict(_lowercase\t\t\t\t)\n\t\t\t\t\treturn flax_params\n\n\n\n\ndef lowercase_\t( _lowercase\t\t\t\t) ->\t\t\t\t\t\t\tOptional[int]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\t{}\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\t{\n\t\t\t\t\t '''token_embedder''': '''embeddings''',\n\t\t\t\t\t '''encoder_norm''': '''layernorm''',\n\t\t\t\t\t '''kernel''': '''weight''',\n\t\t\t\t\t '''.out''': '''.output''',\n\t\t\t\t\t '''scale''': '''weight''',\n\t\t\t\t\t '''embedders_0.pos_embedding''': '''row_embedder.weight''',\n\t\t\t\t\t '''embedders_1.pos_embedding''': '''column_embedder.weight''',\n\t\t\t\t\t}\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Tuple =\t\t{\n\t\t\t\t\t '''query''': '''attention.query''',\n\t\t\t\t\t '''key''': '''attention.key''',\n\t\t\t\t\t '''value''': '''attention.value''',\n\t\t\t\t\t '''output.dense''': '''output''',\n\t\t\t\t\t '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',\n\t\t\t\t\t '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',\n\t\t\t\t\t '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',\n\t\t\t\t\t '''mlp.''': '''mlp.DenseReluDense.''',\n\t\t\t\t\t '''pre_mlp_layer_norm''': '''mlp.layer_norm''',\n\t\t\t\t\t '''self_attention.o''': '''self_attention.attention.o''',\n\t\t\t\t\t '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',\n\t\t\t\t\t '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',\n\t\t\t\t\t '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',\n\t\t\t\t\t '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',\n\t\t\t\t\t}\n\n\t\t\t\t\tfor key in flax_dict.keys():\n\t\t\t\t\t\t\t\t\t\tif \"target\" in key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# remove the first prefix from the key\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Union[str, Any] =\t\t'''.'''.join(key[1:]\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# rename the key\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor old, new in CONVERSION_MAPPING.items():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Dict =\t\tnew_key.replace(_lowercase\t\t\t, _lowercase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"decoder\" in new_key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor old, new in DECODER_CONVERSION_MAPPING.items():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: int =\t\tnew_key.replace(_lowercase\t\t\t, _lowercase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif \"layers\" in new_key and \"decoder\" not in new_key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# use regex to replace the layer number\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[Any] =\t\tre.sub(R'''layers_(\\d+)'''\t\t\t, R'''layer.\\1'''\t\t\t, _lowercase\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Optional[int] =\t\tnew_key.replace('''encoder'''\t\t\t, '''encoder.encoder'''\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telif \"layers\" in new_key and \"decoder\" in new_key:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# use regex to replace the layer number\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\tre.sub(R'''layers_(\\d+)'''\t\t\t, R'''layer.\\1'''\t\t\t, _lowercase\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Optional[int] =\t\tflax_dict[key]\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\t{}\n\t\t\t\t\t# convert converted_dict into torch format\n\t\t\t\t\tfor key in converted_dict.keys():\n\t\t\t\t\t\t\t\t\t\tif (\"embed_tokens\" not in key) and (\"embedder\" not in key):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\ttorch.from_numpy(converted_dict[key].T\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Tuple =\t\ttorch.from_numpy(converted_dict[key]\t\t\t\t)\n\n\t\t\t\t\treturn converted_torch_dict\n\n\n\n\ndef lowercase_\t( _lowercase\t\t\t, _lowercase\t\t\t, _lowercase=False\t\t\t, _lowercase=False\t\t\t\t) ->\t\t\t\t\t\t\tList[str]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\tget_flax_param(_lowercase\t\t\t\t)\n\n\t\t\t\t\tif not use_large:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\tPixaStructVisionConfig()\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\tPixaStructTextConfig()\n\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Any =\t\tPixaStructVisionConfig(\n\t\t\t\t\t\t\t\t\t\t hidden_size=1_536\t\t\t, d_ff=3_968\t\t\t, num_attention_heads=24\t\t\t, num_hidden_layers=18\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Any =\t\tPixaStructTextConfig(hidden_size=1_536\t\t\t, d_ff=3_968\t\t\t, num_heads=24\t\t\t, num_layers=18\t\t\t\t)\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Union[str, Any] =\t\tPixaStructConfig(\n\t\t\t\t\t vision_config=encoder_config.to_dict()\t\t\t, text_config=decoder_config.to_dict()\t\t\t, is_vqa=_lowercase\t\t\t\t)\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Optional[Any] =\t\tPixaStructForConditionalGeneration(_lowercase\t\t\t\t)\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\trename_and_convert_flax_params(_lowercase\t\t\t\t)\n\t\t\t\t\tmodel.load_state_dict(_lowercase\t\t\t\t)\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\tAutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer'''\t\t\t\t)\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[str] =\t\tPixaStructImageProcessor()\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: int =\t\tPixaStructProcessor(image_processor=_lowercase\t\t\t, tokenizer=_lowercase\t\t\t\t)\n\n\t\t\t\t\tif use_large:\n\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[Any] =\t\t4_096\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Tuple =\t\tTrue\n\n\t\t\t\t\t# mkdir if needed\n\t\t\t\t\tos.makedirs(_lowercase\t\t\t, exist_ok=_lowercase\t\t\t\t)\n\n\t\t\t\t\tmodel.save_pretrained(_lowercase\t\t\t\t)\n\t\t\t\t\tprocessor.save_pretrained(_lowercase\t\t\t\t)\n\n\t\t\t\t\tprint('''Model saved in {}'''.format(_lowercase\t\t\t\t)\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n\t\t\t\t\t__lowercase\t\t\t\t\t\t\t: List[Any]\t\t\t\t\t= argparse.ArgumentParser()\n\t\t\t\t\tparser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')\n\t\t\t\t\tparser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')\n\t\t\t\t\tparser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')\n\t\t\t\t\tparser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')\n\t\t\t\t\t__lowercase\t\t\t\t\t\t\t: int\t\t\t\t\t= parser.parse_args()\n\n\t\t\t\t\tconvert_pixastruct_original_pytorch_checkpoint_to_hf(\n\t\t\t\t\t args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large\n\t\t\t\t\t)\n\n\n"},"code_codestyle":{"kind":"number","value":357,"string":"357"},"style_context":{"kind":"string","value":"\n'''simple docstring'''\n\n\ndef lowercase_\t( _lowercase\t\t\t, _lowercase\t\t\t\t) ->\t\t\t\t\t\t\tDict:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: List[Any] =\t\t0\n\t\t\t\t\twhile b > 0:\n\t\t\t\t\t\t\t\t\t\tif b & 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tres += a\n\n\t\t\t\t\t\t\t\t\t\ta += a\n\t\t\t\t\t\t\t\t\t\tb >>= 1\n\n\t\t\t\t\treturn res\n\n\n\n\ndef lowercase_\t( _lowercase\t\t\t, _lowercase\t\t\t, _lowercase\t\t\t\t) ->\t\t\t\t\t\t\tOptional[Any]:\n\t\t\t\t\t'''simple docstring'''\n\n\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: str =\t\t0\n\t\t\t\t\twhile b > 0:\n\t\t\t\t\t\t\t\t\t\tif b & 1:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t: Optional[int] =\t\t((res % c) + (a % c)) % c\n\n\t\t\t\t\t\t\t\t\t\ta += a\n\t\t\t\t\t\t\t\t\t\tb >>= 1\n\n\t\t\t\t\treturn res\n\n\n"},"style_context_codestyle":{"kind":"number","value":357,"string":"357"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":568,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r'''simple docstring'''\r\r\r\r\rfrom __future__ import annotations\r\rfrom collections.abc import Iterable, Iterator\rfrom dataclasses import dataclass\r\rUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= (3, 9, -1_1, 0, 7, 5, 1, -1)\rUpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= (4, 6, 2, 0, 8, 1_0, 3, -2)\r\r@dataclass\rclass lowerCAmelCase_ :\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r\r lowerCAmelCase_\t\t\t\t\t: int\r lowerCAmelCase_\t\t\t\t\t: Node | None\r\rclass lowerCAmelCase_ :\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r\r def __init__( self :\t\t\t\t\t\tOptional[Any] ,\t\t_UpperCAmelCase :\t\t\t\t\t\tIterable[int]\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r UpperCAmelCase__ \t\t\t\t= None\r for i in sorted(_UpperCAmelCase ,\t\treverse=_UpperCAmelCase\t\t\t):\r UpperCAmelCase__ \t\t\t\t= Node(_UpperCAmelCase ,\t\tself.head\t\t\t)\r\r\r def __iter__( self :\t\t\t\t\t\tstr\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r UpperCAmelCase__ \t\t\t\t= self.head\r while node:\r yield node.data\r UpperCAmelCase__ \t\t\t\t= node.next_node\r\r\r def __len__( self :\t\t\t\t\t\tint\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return sum(1 for _ in self\t\t\t)\r\r\r\r def __str__( self :\t\t\t\t\t\tTuple\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return \" -> \".join([str(_UpperCAmelCase\t\t\t) for node in self]\t\t\t)\r\rdef _UpperCamelCase (\t\t\t\t\tSCREAMING_SNAKE_CASE__ : SortedLinkedList\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE__ : SortedLinkedList\t\t\t\t\t\t\t):\r '''simple docstring'''\r\r\r\r\r\r return SortedLinkedList(list(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t) + list(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r UpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= SortedLinkedList\r print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))\r"},"code_codestyle":{"kind":"number","value":603,"string":"603"},"style_context":{"kind":"string","value":"\r\r\r\r\r'''simple docstring'''\r\r\r\r\rfrom __future__ import annotations\r\rfrom bisect import bisect_left\rfrom functools import total_ordering\rfrom heapq import merge\r\r@total_ordering\rclass lowerCAmelCase_ (\t\t\tlowerCamelCase_ ):\r\r\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r\r\r\r def __lt__( self :\t\t\t\t\t\tList[Any] ,\t\t_UpperCAmelCase :\t\t\t\t\t\tDict\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return self[-1] < other[-1]\r\r\r\r def __eq__( self :\t\t\t\t\t\tstr ,\t\t_UpperCAmelCase :\t\t\t\t\t\tTuple\t\t\t):\r\r\r\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r\r return self[-1] == other[-1]\r\rdef _UpperCamelCase (\t\t\t\t\tSCREAMING_SNAKE_CASE__ : list\t\t\t\t\t\t\t):\r '''simple docstring'''\r\r\r\r\r\r UpperCAmelCase__ \t\t\t\t= []\r # sort into stacks\r for element in collection:\r UpperCAmelCase__ \t\t\t\t= Stack([element]\t\t\t\t\t\t\t)\r UpperCAmelCase__ \t\t\t\t= bisect_left(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t, SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\r if i != len(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t):\r stacks[i].append(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\r else:\r stacks.append(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\r\r # use a heap-based merge to merge stack efficiently\r UpperCAmelCase__ \t\t\t\t= merge(*(reversed(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t) for stack in stacks)\t\t\t\t\t\t\t)\r return collection\r\r\rif __name__ == \"__main__\":\r UpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= input('Enter numbers separated by a comma:\\n').strip()\r UpperCAmelCase_\t\t\t\t\t\t\t\t\t\t\t\t= [int(item) for item in user_input.split(',')]\r print(patience_sort(unsorted))\r"},"style_context_codestyle":{"kind":"number","value":603,"string":"603"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":569,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass UpperCamelCase\t\t\t\t\t:\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n lowercase_ :List[str] = num_of_nodes\r\n lowercase_ :list[list[int]] = []\r\n lowercase_ :dict[int, int] = {}\r\n def \t\tUpperCamelCase ( self ,\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n self.m_edges.append([u_node, v_node, weight]\t\t)\r\n def \t\tUpperCamelCase ( self ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n\r\n if self.m_component[u_node] == u_node:\r\n return u_node\r\n return self.find_component(self.m_component[u_node]\t\t)\r\n def \t\tUpperCamelCase ( self ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n\r\n if self.m_component[u_node] != u_node:\r\n for k in self.m_component:\r\n lowercase_ :str = self.find_component(UpperCamelCase_\t\t)\r\n def \t\tUpperCamelCase ( self ,\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n\r\n if component_size[u_node] <= component_size[v_node]:\r\n lowercase_ :List[Any] = v_node\r\n component_size[v_node] += component_size[u_node]\r\n self.set_component(UpperCamelCase_\t\t)\r\n\r\n elif component_size[u_node] >= component_size[v_node]:\r\n lowercase_ :List[Any] = self.find_component(UpperCamelCase_\t\t)\r\n component_size[u_node] += component_size[v_node]\r\n self.set_component(UpperCamelCase_\t\t)\r\n\r\n\r\n\r\n\r\n def \t\tUpperCamelCase ( self\t\t):\r\n lowercase_ :int = []\r\n lowercase_ :str = 0\r\n\r\n lowercase_ :list[Any] = [-1] * self.m_num_of_nodes\r\n\r\n # A list of components (initialized to all of the nodes)\r\n for node in range(self.m_num_of_nodes\t\t):\r\n self.m_component.update({node: node}\t\t)\r\n component_size.append(1\t\t)\r\n\r\n lowercase_ :List[Any] = self.m_num_of_nodes\r\n\r\n while num_of_components > 1:\r\n for edge in self.m_edges:\r\n lowercase_ , lowercase_ , lowercase_ :int = edge\r\n\r\n lowercase_ :Optional[int] = self.m_component[u]\r\n lowercase_ :List[str] = self.m_component[v]\r\n\r\n if u_component != v_component:\r\n\r\n for component in (u_component, v_component):\r\n if (\r\n minimum_weight_edge[component] == -1\r\n or minimum_weight_edge[component][2] > w\r\n ):\r\n lowercase_ :List[str] = [u, v, w]\r\n\r\n for edge in minimum_weight_edge:\r\n if isinstance(UpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_\t\t):\r\n lowercase_ , lowercase_ , lowercase_ :int = edge\r\n\r\n lowercase_ :List[Any] = self.m_component[u]\r\n lowercase_ :Dict = self.m_component[v]\r\n\r\n if u_component != v_component:\r\n mst_weight += w\r\n self.union(UpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_ ,\t\t\t\t\t\t\tUpperCamelCase_\t\t)\r\n print(f\"Added edge [{u} - {v}]\\nAdded weight: {w}\\n\"\t\t)\r\n num_of_components -= 1\r\n\r\n lowercase_ :str = [-1] * self.m_num_of_nodes\r\n print(f\"The total weight of the minimal spanning tree is: {mst_weight}\"\t\t)\r\n\r\n\r\n\r\n\r\ndef UpperCamelCase ( )\t\t\t\t\t->\t\t\t\t\t\t\tNone:\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n\r\n"},"code_codestyle":{"kind":"number","value":441,"string":"441"},"style_context":{"kind":"string","value":"\r\n\r\n\r\ndef UpperCamelCase ( _a ,\t\t_a ,\t\t_a\t\t)\t\t\t\t\t->\t\t\t\t\t\t\tint:\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n def count_of_possible_combinations(_a\t\t) -> int:\r\n if target < 0:\r\n return 0\r\n if target == 0:\r\n return 1\r\n return sum(count_of_possible_combinations(target - item\t\t) for item in array\t\t)\r\n\r\n return count_of_possible_combinations(_a\t\t)\r\n\r\n\r\n\r\n\r\ndef UpperCamelCase ( _a ,\t\t_a ,\t\t_a\t\t)\t\t\t\t\t->\t\t\t\t\t\t\tint:\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n def count_of_possible_combinations_with_dp_array(\r\n _a ,\t\t_a\t\t) -> int:\r\n if target < 0:\r\n return 0\r\n if target == 0:\r\n return 1\r\n if dp_array[target] != -1:\r\n return dp_array[target]\r\n lowercase_ :Optional[int] = sum(\r\n count_of_possible_combinations_with_dp_array(target - item ,\t\t_a\t\t)\r\n for item in array\t\t)\r\n lowercase_ :List[Any] = answer\r\n return answer\r\n\r\n lowercase_ :Dict = [-1] * (target + 1)\r\n return count_of_possible_combinations_with_dp_array(_a ,\t\t_a\t\t)\r\n\r\n\r\n\r\n\r\ndef UpperCamelCase ( _a ,\t\t_a ,\t\t_a\t\t)\t\t\t\t\t->\t\t\t\t\t\t\tint:\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n lowercase_ :Optional[int] = [0] * (target + 1)\r\n lowercase_ :int = 1\r\n\r\n for i in range(1 ,\t\ttarget + 1\t\t):\r\n for j in range(_a\t\t):\r\n if i - array[j] >= 0:\r\n dp_array[i] += dp_array[i - array[j]]\r\n\r\n return dp_array[target]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest\r\n\r\n doctest.testmod()\r\n SCREAMING_SNAKE_CASE :\t\tOptional[Any] =\t\t\t\t3\r\n SCREAMING_SNAKE_CASE :\t\tTuple =\t\t\t\t5\r\n SCREAMING_SNAKE_CASE :\t\tList[Any] =\t\t\t\t[1, 2, 5]\r\n print(combination_sum_iv(n, array, target))\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":441,"string":"441"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":570,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\nimport numpy as np\r\nimport skfuzzy as fuzz\r\n\r\nif __name__ == \"__main__\":\r\n # Create universe of discourse in Python using linspace ()\r\n a_ : List[str]\t\t\t\t\t\t =\t\tnp.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)\r\n\r\n # Create two fuzzy sets by defining any membership function\r\n # (trapmf(), gbellmf(), gaussmf(), etc).\r\n a_ : List[Any]\t\t\t\t\t\t =\t\t[0, 2_5, 5_0]\r\n a_ : Any\t\t\t\t\t\t =\t\t[2_5, 5_0, 7_5]\r\n a_ : str\t\t\t\t\t\t =\t\tfuzz.membership.trimf(X, abca)\r\n a_ : int\t\t\t\t\t\t =\t\tfuzz.membership.trimf(X, abca)\r\n\r\n # Compute the different operations using inbuilt functions.\r\n a_ : List[str]\t\t\t\t\t\t =\t\tnp.ones(7_5)\r\n a_ : List[str]\t\t\t\t\t\t =\t\tnp.zeros((7_5,))\r\n # 1. Union = max(µA(x), µB(x))\r\n a_ : Optional[Any]\t\t\t\t\t\t =\t\tfuzz.fuzzy_or(X, young, X, middle_aged)[1]\r\n # 2. Intersection = min(µA(x), µB(x))\r\n a_ : Optional[Any]\t\t\t\t\t\t =\t\tfuzz.fuzzy_and(X, young, X, middle_aged)[1]\r\n # 3. Complement (A) = (1- min(µA(x))\r\n a_ : int\t\t\t\t\t\t =\t\tfuzz.fuzzy_not(young)\r\n # 4. Difference (A/B) = min(µA(x),(1- µB(x)))\r\n a_ : Tuple\t\t\t\t\t\t =\t\tfuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]\r\n # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]\r\n a_ : int\t\t\t\t\t\t =\t\tyoung + middle_aged - (young * middle_aged)\r\n # 6. Algebraic Product = (µA(x) * µB(x))\r\n a_ : Optional[int]\t\t\t\t\t\t =\t\tyoung * middle_aged\r\n # 7. Bounded Sum = min[1,(µA(x), µB(x))]\r\n a_ : Optional[int]\t\t\t\t\t\t =\t\tfuzz.fuzzy_and(X, one, X, young + middle_aged)[1]\r\n # 8. Bounded difference = min[0,(µA(x), µB(x))]\r\n a_ : Union[str, Any]\t\t\t\t\t\t =\t\tfuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]\r\n\r\n # max-min composition\r\n # max-product composition\r\n\r\n # Plot each set A, set B and each operation result using plot() and subplot().\r\n from matplotlib import pyplot as plt\r\n\r\n plt.figure()\r\n\r\n plt.subplot(4, 3, 1)\r\n plt.plot(X, young)\r\n plt.title('Young')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 2)\r\n plt.plot(X, middle_aged)\r\n plt.title('Middle aged')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 3)\r\n plt.plot(X, union)\r\n plt.title('union')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 4)\r\n plt.plot(X, intersection)\r\n plt.title('intersection')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 5)\r\n plt.plot(X, complement_a)\r\n plt.title('complement_a')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 6)\r\n plt.plot(X, difference)\r\n plt.title('difference a/b')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 7)\r\n plt.plot(X, alg_sum)\r\n plt.title('alg_sum')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 8)\r\n plt.plot(X, alg_product)\r\n plt.title('alg_product')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 9)\r\n plt.plot(X, bdd_sum)\r\n plt.title('bdd_sum')\r\n plt.grid(True)\r\n\r\n plt.subplot(4, 3, 1_0)\r\n plt.plot(X, bdd_difference)\r\n plt.title('bdd_difference')\r\n plt.grid(True)\r\n\r\n plt.subplots_adjust(hspace=0.5)\r\n plt.show()"},"code_codestyle":{"kind":"number","value":623,"string":"623"},"style_context":{"kind":"string","value":"\n\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\nimport unittest\nfrom dataclasses import dataclass\n\nimport pytest\n\nfrom accelerate.commands.config.config_args import SageMakerConfig\nfrom accelerate.utils import ComputeEnvironment\nfrom accelerate.utils.launch import _convert_nargs_to_dict\n\n@dataclass\nclass \t\t\t\tsnake_case_\t\t\t(\t\t\t\t\tlowerCamelCase_\t\t\t):\n\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n A_\t\t\t\t = ComputeEnvironment.AMAZON_SAGEMAKER\n A_\t\t\t\t = True\n A_\t\t\t\t = '''ml.p3.2xlarge'''\n A_\t\t\t\t = '''accelerate_sagemaker_execution_role'''\n A_\t\t\t\t = '''hf-sm'''\n A_\t\t\t\t = '''us-east-1'''\n A_\t\t\t\t = 1\n A_\t\t\t\t = '''accelerate-sagemaker-1'''\n A_\t\t\t\t = '''1.6'''\n A_\t\t\t\t = '''4.4'''\n A_\t\t\t\t = '''train.py'''\n A_\t\t\t\t = [\n '''--model_name_or_path''',\n '''bert''',\n '''--do_train''',\n '''False''',\n '''--epochs''',\n '''3''',\n '''--learning_rate''',\n '''5e-5''',\n '''--max_steps''',\n '''50.5''',\n ]\n A_\t\t\t\t = [\n '''--model_name_or_path''',\n '''bert''',\n '''--do_train''',\n '''--do_test''',\n '''False''',\n '''--do_predict''',\n '''--epochs''',\n '''3''',\n '''--learning_rate''',\n '''5e-5''',\n '''--max_steps''',\n '''50.5''',\n ]\n\nclass \t\t\t\tsnake_case_\t\t\t(\t\t\t\t\tunittest.TestCase\t\t\t):\n\n\n\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n def UpperCAmelCase__\t\t\t(\t\tself)\t\t\t\t-> List[Any]:\n # If no defaults are changed, `to_kwargs` returns an empty dict.\n UpperCamelCase =\t_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)\n assert isinstance(converted_args['''model_name_or_path'''] ,\t\t\t\t\tlowerCamelCase_)\n assert isinstance(converted_args['''do_train'''] ,\t\t\t\t\tlowerCamelCase_)\n assert isinstance(converted_args['''epochs'''] ,\t\t\t\t\tlowerCamelCase_)\n assert isinstance(converted_args['''learning_rate'''] ,\t\t\t\t\tlowerCamelCase_)\n assert isinstance(converted_args['''max_steps'''] ,\t\t\t\t\tlowerCamelCase_)\n\n with pytest.raises(lowerCamelCase_):\n _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)"},"style_context_codestyle":{"kind":"number","value":34,"string":"34"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":571,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\rimport argparse\rimport logging\rfrom collections import namedtuple\r\rimport torch\rfrom model_bertabs import BertAbsSummarizer\rfrom models.model_builder import AbsSummarizer # The authors' implementation\r\rfrom transformers import BertTokenizer\r\r\rlogging.basicConfig(level=logging.INFO)\rA_\t\t: Dict\t\t =\tlogging.getLogger(__name__)\r\r\rA_\t\t: int\t\t =\t\"Hello world! cécé herlolip\"\r\r\rA_\t\t: Dict\t\t =\tnamedtuple(\r \"BertAbsConfig\",\r [\r \"temp_dir\",\r \"large\",\r \"use_bert_emb\",\r \"finetune_bert\",\r \"encoder\",\r \"share_emb\",\r \"max_pos\",\r \"enc_layers\",\r \"enc_hidden_size\",\r \"enc_heads\",\r \"enc_ff_size\",\r \"enc_dropout\",\r \"dec_layers\",\r \"dec_hidden_size\",\r \"dec_heads\",\r \"dec_ff_size\",\r \"dec_dropout\",\r ],\r)\r\r\r\rdef \t\t\t\t\tUpperCamelCase__\t\t(\t__magic_name__ : int\t\t,\t\t\t\t\t__magic_name__ : List[str]\t\t\t)\t\t\t\t\t\t\t-> List[Any]:\r '''simple docstring'''\r\r\r\r\r\r\r snake_case__ :\t\t\t\tstr = BertAbsConfig(\r temp_dir=\"\"\".\"\"\"\t\t,\t\t\t\t\tfinetune_bert=__magic_name__\t\t,\t\t\t\t\tlarge=__magic_name__\t\t,\t\t\t\t\tshare_emb=__magic_name__\t\t,\t\t\t\t\tuse_bert_emb=__magic_name__\t\t,\t\t\t\t\tencoder=\"\"\"bert\"\"\"\t\t,\t\t\t\t\tmax_pos=5_12\t\t,\t\t\t\t\tenc_layers=6\t\t,\t\t\t\t\tenc_hidden_size=5_12\t\t,\t\t\t\t\tenc_heads=8\t\t,\t\t\t\t\tenc_ff_size=5_12\t\t,\t\t\t\t\tenc_dropout=0.2\t\t,\t\t\t\t\tdec_layers=6\t\t,\t\t\t\t\tdec_hidden_size=7_68\t\t,\t\t\t\t\tdec_heads=8\t\t,\t\t\t\t\tdec_ff_size=20_48\t\t,\t\t\t\t\tdec_dropout=0.2\t\t,\t\t\t\t\t)\r snake_case__ :\t\t\t\tOptional[Any] = torch.load(__magic_name__\t\t,\t\t\t\t\tlambda __magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t\t: storage\t\t\t)\r snake_case__ :\t\t\t\tint = AbsSummarizer(__magic_name__\t\t,\t\t\t\t\ttorch.device(\"\"\"cpu\"\"\"\t\t\t)\t\t,\t\t\t\t\t__magic_name__\t\t\t)\r original.eval()\r\r snake_case__ :\t\t\t\tOptional[int] = BertAbsSummarizer(__magic_name__\t\t,\t\t\t\t\ttorch.device(\"\"\"cpu\"\"\"\t\t\t)\t\t\t)\r new_model.eval()\r\r # -------------------\r # Convert the weights\r # -------------------\r\r logging.info(\"\"\"convert the model\"\"\"\t\t\t)\r new_model.bert.load_state_dict(original.bert.state_dict()\t\t\t)\r new_model.decoder.load_state_dict(original.decoder.state_dict()\t\t\t)\r new_model.generator.load_state_dict(original.generator.state_dict()\t\t\t)\r\r # ----------------------------------\r # Make sure the outpus are identical\r # ----------------------------------\r\r logging.info(\"\"\"Make sure that the models' outputs are identical\"\"\"\t\t\t)\r snake_case__ :\t\t\t\tstr = BertTokenizer.from_pretrained(\"\"\"bert-base-uncased\"\"\"\t\t\t)\r\r # prepare the model inputs\r snake_case__ :\t\t\t\tAny = tokenizer.encode(\"\"\"This is sample éàalj'-.\"\"\"\t\t\t)\r encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__\t\t\t))\t\t\t)\r snake_case__ :\t\t\t\tint = torch.tensor(__magic_name__\t\t\t).unsqueeze(0\t\t\t)\r snake_case__ :\t\t\t\tint = tokenizer.encode(\"\"\"This is sample 3 éàalj'-.\"\"\"\t\t\t)\r decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__\t\t\t))\t\t\t)\r snake_case__ :\t\t\t\tTuple = torch.tensor(__magic_name__\t\t\t).unsqueeze(0\t\t\t)\r\r # failsafe to make sure the weights reset does not affect the\r # loaded weights.\r assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight\t\t\t)\t\t\t) == 0\r\r # forward pass\r snake_case__ :\t\t\t\tOptional[int] = encoder_input_ids\r snake_case__ :\t\t\t\tTuple = decoder_input_ids\r snake_case__ :\t\t\t\tint = None\r snake_case__ :\t\t\t\tOptional[Any] = None\r snake_case__ :\t\t\t\tOptional[int] = None\r snake_case__ :\t\t\t\tOptional[Any] = None\r snake_case__ :\t\t\t\tstr = None\r\r # The original model does not apply the geneator layer immediatly but rather in\r # the beam search (where it combines softmax + linear layer). Since we already\r # apply the softmax in our generation process we only apply the linear layer here.\r # We make sure that the outputs of the full stack are identical\r snake_case__ :\t\t\t\tUnion[str, Any] = original(__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t\t)[0]\r snake_case__ :\t\t\t\tDict = original.generator(__magic_name__\t\t\t)\r\r snake_case__ :\t\t\t\tOptional[Any] = new_model(\r __magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t\t)[0]\r snake_case__ :\t\t\t\tUnion[str, Any] = new_model.generator(__magic_name__\t\t\t)\r\r snake_case__ :\t\t\t\tint = torch.max(torch.abs(output_converted_model - output_original_model\t\t\t)\t\t\t).item()\r print(\"\"\"Maximum absolute difference beween weights: {:.2f}\"\"\".format(__magic_name__\t\t\t)\t\t\t)\r snake_case__ :\t\t\t\tOptional[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator\t\t\t)\t\t\t).item()\r print(\"\"\"Maximum absolute difference beween weights: {:.2f}\"\"\".format(__magic_name__\t\t\t)\t\t\t)\r\r snake_case__ :\t\t\t\tDict = torch.allclose(__magic_name__\t\t,\t\t\t\t\t__magic_name__\t\t,\t\t\t\t\tatol=1E-3\t\t\t)\r if are_identical:\r logging.info(\"\"\"all weights are equal up to 1e-3\"\"\"\t\t\t)\r else:\r raise ValueError(\"\"\"the weights are different. The new model is likely different from the original one.\"\"\"\t\t\t)\r\r # The model has been saved with torch.save(model) and this is bound to the exact\r # directory structure. We save the state_dict instead.\r logging.info(\"\"\"saving the model's state dictionary\"\"\"\t\t\t)\r torch.save(\r new_model.state_dict()\t\t,\t\t\t\t\t\"\"\"./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin\"\"\"\t\t\t)\r\r\rif __name__ == \"__main__\":\r A_\t\t: Optional[Any]\t\t =\targparse.ArgumentParser()\r parser.add_argument(\r \"--bertabs_checkpoint_path\",\r default=None,\r type=str,\r required=True,\r help=\"Path the official PyTorch dump.\",\r )\r parser.add_argument(\r \"--pytorch_dump_folder_path\",\r default=None,\r type=str,\r required=True,\r help=\"Path to the output PyTorch model.\",\r )\r A_\t\t: Union[str, Any]\t\t =\tparser.parse_args()\r\r convert_bertabs_checkpoints(\r args.bertabs_checkpoint_path,\r args.pytorch_dump_folder_path,\r )\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":419,"string":"419"},"style_context":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\r\r\r\r\r\rimport argparse\r\rfrom tax import checkpoints\r\rfrom transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM\r\r\r\rdef \t\t\t\t\tUpperCamelCase__\t\t(\t__magic_name__ : Union[str, Any]\t\t,\t\t\t\t\t__magic_name__ : str\t\t,\t\t\t\t\t__magic_name__ : Any\t\t\t)\t\t\t\t\t\t\t-> Optional[Any]:\r '''simple docstring'''\r\r\r\r\r\r\r snake_case__ :\t\t\t\tUnion[str, Any] = AutoConfig.from_pretrained(__magic_name__\t\t\t)\r snake_case__ :\t\t\t\tOptional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__\t\t\t)\r snake_case__ :\t\t\t\tstr = checkpoints.load_tax_checkpoint(__magic_name__\t\t\t)\r\r snake_case__ :\t\t\t\tOptional[int] = \"\"\"wi_0\"\"\" in tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][\"\"\"layers_0\"\"\"][\"\"\"mlp\"\"\"]\r\r if config.model_type == \"t5\":\r snake_case__ :\t\t\t\tTuple = \"\"\"SelfAttention\"\"\"\r if config.model_type == \"longt5\" and config.encoder_attention_type == \"local\":\r snake_case__ :\t\t\t\tList[Any] = \"\"\"LocalSelfAttention\"\"\"\r elif config.model_type == \"longt5\" and config.encoder_attention_type == \"transient-global\":\r snake_case__ :\t\t\t\tList[str] = \"\"\"TransientGlobalSelfAttention\"\"\"\r else:\r raise ValueError(\r \"\"\"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`\"\"\"\r \"\"\" attribute with a value from ['local', 'transient-global].\"\"\"\t\t\t)\r\r # Encoder\r for layer_index in range(config.num_layers\t\t\t):\r snake_case__ :\t\t\t\tUnion[str, Any] = f\"layers_{str(__magic_name__\t\t\t)}\"\r\r # Self-Attention\r snake_case__ :\t\t\t\tTuple = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"attention\"\"\"][\"\"\"key\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tList[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"attention\"\"\"][\"\"\"out\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"attention\"\"\"][\"\"\"query\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tOptional[int] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"attention\"\"\"][\"\"\"value\"\"\"][\"\"\"kernel\"\"\"]\r\r # Global input layer norm\r if config.model_type == \"longt5\" and config.encoder_attention_type == \"transient-global\":\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"attention\"\"\"][\"\"\"T5LayerNorm_0\"\"\"][\"\"\"scale\"\"\"]\r\r # Layer Normalization\r snake_case__ :\t\t\t\tAny = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"pre_attention_layer_norm\"\"\"][\"\"\"scale\"\"\"]\r\r if split_mlp_wi:\r snake_case__ :\t\t\t\tList[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi_0\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tTuple = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi_1\"\"\"][\"\"\"kernel\"\"\"]\r else:\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi\"\"\"][\"\"\"kernel\"\"\"]\r\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wo\"\"\"][\"\"\"kernel\"\"\"]\r\r # Layer Normalization\r snake_case__ :\t\t\t\tAny = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][layer_name][\"\"\"pre_mlp_layer_norm\"\"\"][\"\"\"scale\"\"\"]\r\r # Assigning\r snake_case__ :\t\t\t\tint = flax_model.params[\"\"\"encoder\"\"\"][\"\"\"block\"\"\"][str(__magic_name__\t\t\t)][\"\"\"layer\"\"\"]\r snake_case__ :\t\t\t\tint = tax_attention_key\r snake_case__ :\t\t\t\tOptional[int] = tax_attention_out\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_attention_query\r snake_case__ :\t\t\t\tTuple = tax_attention_value\r\r snake_case__ :\t\t\t\tDict = tax_attention_layer_norm\r\r # Global input layer norm\r if config.model_type == \"longt5\" and config.encoder_attention_type == \"transient-global\":\r snake_case__ :\t\t\t\tOptional[int] = tax_global_layer_norm\r\r if split_mlp_wi:\r snake_case__ :\t\t\t\tList[str] = tax_mlp_wi_a\r snake_case__ :\t\t\t\tAny = tax_mlp_wi_a\r else:\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_mlp_wi\r\r snake_case__ :\t\t\t\tOptional[Any] = tax_mlp_wo\r snake_case__ :\t\t\t\tList[str] = tax_mlp_layer_norm\r\r snake_case__ :\t\t\t\tList[Any] = flax_model_encoder_layer_block\r\r # Only for layer 0:\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][\"\"\"relpos_bias\"\"\"][\"\"\"rel_embedding\"\"\"].T\r snake_case__ :\t\t\t\tstr = tax_encoder_rel_embedding\r\r # Side/global relative position_bias + layer norm\r if config.model_type == \"longt5\" and config.encoder_attention_type == \"transient-global\":\r snake_case__ :\t\t\t\tstr = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][\"\"\"side_relpos_bias\"\"\"][\"\"\"rel_embedding\"\"\"].T\r snake_case__ :\t\t\t\tDict = tax_encoder_global_rel_embedding\r\r # Assigning\r snake_case__ :\t\t\t\tList[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"encoder\"\"\"][\"\"\"encoder_norm\"\"\"][\"\"\"scale\"\"\"]\r snake_case__ :\t\t\t\tDict = tax_encoder_norm\r\r # Decoder\r for layer_index in range(config.num_layers\t\t\t):\r snake_case__ :\t\t\t\tOptional[Any] = f\"layers_{str(__magic_name__\t\t\t)}\"\r\r # Self-Attention\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"self_attention\"\"\"][\"\"\"key\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tstr = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"self_attention\"\"\"][\"\"\"out\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tList[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"self_attention\"\"\"][\"\"\"query\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tAny = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"self_attention\"\"\"][\"\"\"value\"\"\"][\"\"\"kernel\"\"\"]\r\r # Layer Normalization\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"pre_self_attention_layer_norm\"\"\"][\r \"\"\"scale\"\"\"\r ]\r\r # Encoder-Decoder-Attention\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"encoder_decoder_attention\"\"\"]\r snake_case__ :\t\t\t\tint = tax_enc_dec_attention_module[\"\"\"key\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tAny = tax_enc_dec_attention_module[\"\"\"out\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tUnion[str, Any] = tax_enc_dec_attention_module[\"\"\"query\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tint = tax_enc_dec_attention_module[\"\"\"value\"\"\"][\"\"\"kernel\"\"\"]\r\r # Layer Normalization\r snake_case__ :\t\t\t\tDict = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"pre_cross_attention_layer_norm\"\"\"][\"\"\"scale\"\"\"]\r\r # MLP\r if split_mlp_wi:\r snake_case__ :\t\t\t\tOptional[int] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi_0\"\"\"][\"\"\"kernel\"\"\"]\r snake_case__ :\t\t\t\tint = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi_1\"\"\"][\"\"\"kernel\"\"\"]\r else:\r snake_case__ :\t\t\t\tList[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wi\"\"\"][\"\"\"kernel\"\"\"]\r\r snake_case__ :\t\t\t\tint = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"mlp\"\"\"][\"\"\"wo\"\"\"][\"\"\"kernel\"\"\"]\r\r # Layer Normalization\r snake_case__ :\t\t\t\tAny = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][layer_name][\"\"\"pre_mlp_layer_norm\"\"\"][\"\"\"scale\"\"\"]\r\r # Assigning\r snake_case__ :\t\t\t\tUnion[str, Any] = flax_model.params[\"\"\"decoder\"\"\"][\"\"\"block\"\"\"][str(__magic_name__\t\t\t)][\"\"\"layer\"\"\"]\r snake_case__ :\t\t\t\tDict = tax_attention_key\r snake_case__ :\t\t\t\tint = tax_attention_out\r snake_case__ :\t\t\t\tstr = tax_attention_query\r snake_case__ :\t\t\t\tAny = tax_attention_value\r\r snake_case__ :\t\t\t\tList[Any] = tax_pre_attention_layer_norm\r\r snake_case__ :\t\t\t\tAny = tax_enc_dec_attention_key\r snake_case__ :\t\t\t\tAny = tax_enc_dec_attention_out\r snake_case__ :\t\t\t\tAny = tax_enc_dec_attention_query\r snake_case__ :\t\t\t\tstr = tax_enc_dec_attention_value\r\r snake_case__ :\t\t\t\tDict = tax_cross_layer_norm\r\r if split_mlp_wi:\r snake_case__ :\t\t\t\tTuple = tax_mlp_wi_a\r snake_case__ :\t\t\t\tDict = tax_mlp_wi_a\r else:\r snake_case__ :\t\t\t\tint = tax_mlp_wi\r\r snake_case__ :\t\t\t\tList[Any] = tax_mlp_wo\r\r snake_case__ :\t\t\t\tUnion[str, Any] = txa_mlp_layer_norm\r\r snake_case__ :\t\t\t\tint = flax_model_decoder_layer_block\r\r # Decoder Normalization\r snake_case__ :\t\t\t\tstr = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][\"\"\"decoder_norm\"\"\"][\"\"\"scale\"\"\"]\r snake_case__ :\t\t\t\tList[str] = txa_decoder_norm\r\r # Only for layer 0:\r snake_case__ :\t\t\t\tList[str] = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][\"\"\"relpos_bias\"\"\"][\"\"\"rel_embedding\"\"\"].T\r snake_case__ :\t\t\t\tAny = tax_decoder_rel_embedding\r\r # Token Embeddings\r snake_case__ :\t\t\t\tOptional[Any] = tax_model[\"\"\"target\"\"\"][\"\"\"token_embedder\"\"\"][\"\"\"embedding\"\"\"]\r snake_case__ :\t\t\t\tOptional[int] = txa_token_embeddings\r\r # LM Head (only in v1.1 and LongT5 checkpoints)\r if \"logits_dense\" in tax_model[\"target\"][\"decoder\"]:\r snake_case__ :\t\t\t\tstr = tax_model[\"\"\"target\"\"\"][\"\"\"decoder\"\"\"][\"\"\"logits_dense\"\"\"][\"\"\"kernel\"\"\"]\r\r flax_model.save_pretrained(__magic_name__\t\t\t)\r print(\"\"\"T5X Model was sucessfully converted!\"\"\"\t\t\t)\r\r\rif __name__ == \"__main__\":\r A_\t\t: Union[str, Any]\t\t =\targparse.ArgumentParser()\r # Required parameters\r parser.add_argument(\r \"--t5x_checkpoint_path\", default=None, type=str, required=True, help=\"Path the T5X checkpoint.\"\r )\r parser.add_argument(\"--config_name\", default=None, type=str, required=True, help=\"Config name of LongT5/T5 model.\")\r parser.add_argument(\r \"--flax_dump_folder_path\", default=None, type=str, required=True, help=\"Path to the output FLAX model.\"\r )\r A_\t\t: Dict\t\t =\tparser.parse_args()\r convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":419,"string":"419"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":572,"cells":{"code":{"kind":"string","value":"\n\nimport argparse\nimport json\nfrom pathlib import Path\n\nimport requests\nimport torch\nfrom huggingface_hub import hf_hub_download\nfrom PIL import Image\nfrom timm import create_model\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\nfrom transformers import BitConfig, BitForImageClassification, BitImageProcessor\nfrom transformers.image_utils import PILImageResampling\nfrom transformers.utils import logging\n\n\nlogging.set_verbosity_info()\n__lowercase : Any =\t\t\t\t\t\tlogging.get_logger(__name__)\n\n\n\n\n\n\n\ndef lowercase\t\t\t\t\t(\t__A :\t\t\t\t\tOptional[Any]\t\t)\t\t\t\t->\t\t\tDict:\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n snake_case\t\t\t\t\t\t\t:\t\tDict =\t\t\"\"\"huggingface/label-files\"\"\"\n snake_case\t\t\t\t\t\t\t:\t\tint =\t\t\"\"\"imagenet-1k-id2label.json\"\"\"\n snake_case\t\t\t\t\t\t\t:\t\tTuple =\t\tjson.load(open(hf_hub_download(__A , __A , repo_type=\"\"\"dataset\"\"\"\t\t) , \"\"\"r\"\"\"\t\t)\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tAny =\t\t{int(__A\t\t): v for k, v in idalabel.items()}\n snake_case\t\t\t\t\t\t\t:\t\tDict =\t\t{v: k for k, v in idalabel.items()}\n\n snake_case\t\t\t\t\t\t\t:\t\tAny =\t\t\"\"\"std_conv\"\"\" if \"\"\"bit\"\"\" in model_name else False\n\n # note that when using BiT as backbone for ViT-hybrid checkpoints,\n # one needs to additionally set config.layer_type = \"bottleneck\", config.stem_type = \"same\",\n # config.conv_layer = \"std_conv_same\"\n snake_case\t\t\t\t\t\t\t:\t\tList[Any] =\t\tBitConfig(\n conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )\n\n return config\n\n\n\n\n\n\n\ndef lowercase\t\t\t\t\t(\t__A :\t\t\t\t\tOptional[int]\t\t)\t\t\t\t->\t\t\tUnion[str, Any]:\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n if \"stem.conv\" in name:\n snake_case\t\t\t\t\t\t\t:\t\tList[str] =\t\tname.replace(\"\"\"stem.conv\"\"\" , \"\"\"bit.embedder.convolution\"\"\"\t\t)\n if \"blocks\" in name:\n snake_case\t\t\t\t\t\t\t:\t\tList[str] =\t\tname.replace(\"\"\"blocks\"\"\" , \"\"\"layers\"\"\"\t\t)\n if \"head.fc\" in name:\n snake_case\t\t\t\t\t\t\t:\t\tOptional[int] =\t\tname.replace(\"\"\"head.fc\"\"\" , \"\"\"classifier.1\"\"\"\t\t)\n if name.startswith(\"\"\"norm\"\"\"\t\t):\n snake_case\t\t\t\t\t\t\t:\t\tOptional[Any] =\t\t\"\"\"bit.\"\"\" + name\n if \"bit\" not in name and \"classifier\" not in name:\n snake_case\t\t\t\t\t\t\t:\t\tTuple =\t\t\"\"\"bit.encoder.\"\"\" + name\n\n return name\n\n\n\n\n\n\n\ndef lowercase\t\t\t\t\t(\t)\t\t\t\t->\t\t\tOptional[int]:\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n snake_case\t\t\t\t\t\t\t:\t\tint =\t\t\"\"\"http://images.cocodataset.org/val2017/000000039769.jpg\"\"\"\n snake_case\t\t\t\t\t\t\t:\t\tOptional[Any] =\t\tImage.open(requests.get(__A , stream=__A\t\t).raw\t\t)\n return im\n\n\n\n\n\n\n\n@torch.no_grad()\ndef lowercase\t\t\t\t\t(\t__A :\t\t\t\t\tAny , __A :\t\t\t\t\tUnion[str, Any] , __A :\t\t\t\t\tstr=False\t\t)\t\t\t\t->\t\t\tOptional[int]:\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n snake_case\t\t\t\t\t\t\t:\t\tstr =\t\tget_config(__A\t\t)\n\n # load original model from timm\n snake_case\t\t\t\t\t\t\t:\t\tTuple =\t\tcreate_model(__A , pretrained=__A\t\t)\n timm_model.eval()\n\n # load state_dict of original model\n snake_case\t\t\t\t\t\t\t:\t\tList[str] =\t\ttimm_model.state_dict()\n for key in state_dict.copy().keys():\n snake_case\t\t\t\t\t\t\t:\t\tList[Any] =\t\tstate_dict.pop(__A\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tUnion[str, Any] =\t\tval.squeeze() if \"\"\"head\"\"\" in key else val\n\n # load HuggingFace model\n snake_case\t\t\t\t\t\t\t:\t\tList[Any] =\t\tBitForImageClassification(__A\t\t)\n model.eval()\n model.load_state_dict(__A\t\t)\n\n # create image processor\n snake_case\t\t\t\t\t\t\t:\t\tDict =\t\tcreate_transform(**resolve_data_config({} , model=__A\t\t)\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tOptional[Any] =\t\ttransform.transforms\n\n snake_case\t\t\t\t\t\t\t:\t\tList[Any] =\t\t{\n \"\"\"bilinear\"\"\": PILImageResampling.BILINEAR,\n \"\"\"bicubic\"\"\": PILImageResampling.BICUBIC,\n \"\"\"nearest\"\"\": PILImageResampling.NEAREST,\n }\n\n snake_case\t\t\t\t\t\t\t:\t\tUnion[str, Any] =\t\tBitImageProcessor(\n do_resize=__A , size={\"\"\"shortest_edge\"\"\": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={\"\"\"height\"\"\": timm_transforms[1].size[0], \"\"\"width\"\"\": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )\n\n snake_case\t\t\t\t\t\t\t:\t\tDict =\t\tprepare_img()\n snake_case\t\t\t\t\t\t\t:\t\tList[str] =\t\ttransform(__A\t\t).unsqueeze(0\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tint =\t\tprocessor(__A , return_tensors=\"\"\"pt\"\"\"\t\t).pixel_values\n\n # verify pixel values\n assert torch.allclose(__A , __A\t\t)\n\n # verify logits\n with torch.no_grad():\n snake_case\t\t\t\t\t\t\t:\t\tOptional[int] =\t\tmodel(__A\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tDict =\t\toutputs.logits\n\n print(\"\"\"Logits:\"\"\" , logits[0, :3]\t\t)\n print(\"\"\"Predicted class:\"\"\" , model.config.idalabel[logits.argmax(-1\t\t).item()]\t\t)\n snake_case\t\t\t\t\t\t\t:\t\tint =\t\ttimm_model(__A\t\t)\n assert timm_logits.shape == outputs.logits.shape\n assert torch.allclose(__A , outputs.logits , atol=1E-3\t\t)\n print(\"\"\"Looks ok!\"\"\"\t\t)\n\n if pytorch_dump_folder_path is not None:\n Path(__A\t\t).mkdir(exist_ok=__A\t\t)\n print(f\"\"\"Saving model {model_name} and processor to {pytorch_dump_folder_path}\"\"\"\t\t)\n model.save_pretrained(__A\t\t)\n processor.save_pretrained(__A\t\t)\n\n if push_to_hub:\n print(f\"\"\"Pushing model {model_name} and processor to the hub\"\"\"\t\t)\n model.push_to_hub(f\"\"\"ybelkada/{model_name}\"\"\"\t\t)\n processor.push_to_hub(f\"\"\"ybelkada/{model_name}\"\"\"\t\t)\n\n\nif __name__ == \"__main__\":\n __lowercase : List[str] =\t\t\t\t\t\targparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n '''--model_name''',\n default='''resnetv2_50x1_bitm''',\n type=str,\n help='''Name of the BiT timm model you\\'d like to convert.''',\n )\n parser.add_argument(\n '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''\n )\n parser.add_argument(\n '''--push_to_hub''',\n action='''store_true''',\n help='''Whether to push the model to the hub.''',\n )\n\n __lowercase : Union[str, Any] =\t\t\t\t\t\tparser.parse_args()\n convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)\n\n\n\n"},"code_codestyle":{"kind":"number","value":36,"string":"36"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\rdef A__ ( A__\t\t)\t\t->\t\t\tlist[int]:\r\r\r\r\r\r '''simple docstring'''\r\r\r\r\r if length <= 0 or not isinstance(A__ , A__\t\t):\r raise ValueError(\"Length must be a positive integer.\"\t\t)\r return [n * (2 * n - 1) for n in range(A__\t\t)]\r\r\rif __name__ == \"__main__\":\r print(hexagonal_numbers(length=5))\r print(hexagonal_numbers(length=10))\r\r"},"style_context_codestyle":{"kind":"number","value":426,"string":"426"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":573,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r\r'''simple docstring'''\r\r\r\rfrom collections import deque\rclass \t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE :\r\r\r\r\r def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase )\t\t\t\t\t\t\t->None:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = process_name # process name\r __a\t\t\t\t = arrival_time # arrival time of the process\r # completion time of finished process or last interrupted time\r __a\t\t\t\t = arrival_time\r __a\t\t\t\t = burst_time # remaining burst time\r __a\t\t\t\t = 0 # total time of the process wait in ready queue\r __a\t\t\t\t = 0 # time from arrival time to completion time\r\r\r\r\r\r\r\rclass \t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE :\r\r\r\r\r def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )\t\t\t\t\t\t\t->None:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = number_of_queues\r # time slice of queues that round robin algorithm applied\r __a\t\t\t\t = time_slices\r # unfinished process is in this ready_queue\r __a\t\t\t\t = queue\r # current time\r __a\t\t\t\t = current_time\r # finished process is in this sequence queue\r __a\t\t\t\t = deque()\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self )\t\t\t\t\t\t\t->list[str]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = []\r for i in range(len(self.finish_queue ) ):\r sequence.append(self.finish_queue[i].process_name )\r return sequence\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->list[int]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = []\r for i in range(len(lowerCamelCase ) ):\r waiting_times.append(queue[i].waiting_time )\r return waiting_times\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->list[int]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = []\r for i in range(len(lowerCamelCase ) ):\r turnaround_times.append(queue[i].turnaround_time )\r return turnaround_times\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->list[int]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = []\r for i in range(len(lowerCamelCase ) ):\r completion_times.append(queue[i].stop_time )\r return completion_times\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->list[int]:\r\r\r\r '''simple docstring'''\r return [q.burst_time for q in queue]\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->int:\r\r\r\r '''simple docstring'''\r process.waiting_time += self.current_time - process.stop_time\r return process.waiting_time\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase )\t\t\t\t\t\t\t->deque[Process]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = deque() # sequence deque of finished process\r while len(lowerCamelCase ) != 0:\r __a\t\t\t\t = ready_queue.popleft() # current process\r\r # if process's arrival time is later than current time, update current time\r if self.current_time < cp.arrival_time:\r self.current_time += cp.arrival_time\r\r # update waiting time of current process\r self.update_waiting_time(lowerCamelCase )\r # update current time\r self.current_time += cp.burst_time\r # finish the process and set the process's burst-time 0\r __a\t\t\t\t = 0\r # set the process's turnaround time because it is finished\r __a\t\t\t\t = self.current_time - cp.arrival_time\r # set the completion time\r __a\t\t\t\t = self.current_time\r # add the process to queue that has finished queue\r finished.append(lowerCamelCase )\r\r self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue\r # FCFS will finish all remaining processes\r return finished\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self , lowerCamelCase , lowerCamelCase )\t\t\t\t\t\t\t->tuple[deque[Process], deque[Process]]:\r\r\r\r '''simple docstring'''\r __a\t\t\t\t = deque() # sequence deque of terminated process\r # just for 1 cycle and unfinished processes will go back to queue\r for _ in range(len(lowerCamelCase ) ):\r __a\t\t\t\t = ready_queue.popleft() # current process\r\r # if process's arrival time is later than current time, update current time\r if self.current_time < cp.arrival_time:\r self.current_time += cp.arrival_time\r\r # update waiting time of unfinished processes\r self.update_waiting_time(lowerCamelCase )\r # if the burst time of process is bigger than time-slice\r if cp.burst_time > time_slice:\r # use CPU for only time-slice\r self.current_time += time_slice\r # update remaining burst time\r cp.burst_time -= time_slice\r # update end point time\r __a\t\t\t\t = self.current_time\r # locate the process behind the queue because it is not finished\r ready_queue.append(lowerCamelCase )\r else:\r # use CPU for remaining burst time\r self.current_time += cp.burst_time\r # set burst time 0 because the process is finished\r __a\t\t\t\t = 0\r # set the finish time\r __a\t\t\t\t = self.current_time\r # update the process' turnaround time because it is finished\r __a\t\t\t\t = self.current_time - cp.arrival_time\r # add the process to queue that has finished queue\r finished.append(lowerCamelCase )\r\r self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue\r # return finished processes queue and remaining processes queue\r return finished, ready_queue\r\r\r\r\r\r\r def __UpperCamelCase\t\t\t\t\t\t\t( self )\t\t\t\t\t\t\t->deque[Process]:\r\r\r\r '''simple docstring'''\r\r # all queues except last one have round_robin algorithm\r for i in range(self.number_of_queues - 1 ):\r __a ,\t\t\t__a\t\t\t\t = self.round_robin(\r self.ready_queue , self.time_slices[i] )\r # the last queue has first_come_first_served algorithm\r self.first_come_first_served(self.ready_queue )\r\r return self.finish_queue\r\r\rif __name__ == \"__main__\":\r import doctest\r\r __UpperCamelCase : str \t\t\t= Process(\"\"\"P1\"\"\", 0, 53)\r __UpperCamelCase : Union[str, Any] \t\t\t= Process(\"\"\"P2\"\"\", 0, 17)\r __UpperCamelCase : Union[str, Any] \t\t\t= Process(\"\"\"P3\"\"\", 0, 68)\r __UpperCamelCase : Optional[Any] \t\t\t= Process(\"\"\"P4\"\"\", 0, 24)\r __UpperCamelCase : int \t\t\t= 3\r __UpperCamelCase : Any \t\t\t= [17, 25]\r __UpperCamelCase : Tuple \t\t\t= deque([Pa, Pa, Pa, Pa])\r\r if len(time_slices) != number_of_queues - 1:\r raise SystemExit(0)\r\r doctest.testmod(extraglobs={\"\"\"queue\"\"\": deque([Pa, Pa, Pa, Pa])})\r\r __UpperCamelCase : Optional[int] \t\t\t= Process(\"\"\"P1\"\"\", 0, 53)\r __UpperCamelCase : Union[str, Any] \t\t\t= Process(\"\"\"P2\"\"\", 0, 17)\r __UpperCamelCase : Union[str, Any] \t\t\t= Process(\"\"\"P3\"\"\", 0, 68)\r __UpperCamelCase : Optional[int] \t\t\t= Process(\"\"\"P4\"\"\", 0, 24)\r __UpperCamelCase : Optional[Any] \t\t\t= 3\r __UpperCamelCase : Union[str, Any] \t\t\t= [17, 25]\r __UpperCamelCase : Optional[Any] \t\t\t= deque([Pa, Pa, Pa, Pa])\r __UpperCamelCase : int \t\t\t= MLFQ(number_of_queues, time_slices, queue, 0)\r __UpperCamelCase : Optional[Any] \t\t\t= mlfq.multi_level_feedback_queue()\r\r # print total waiting times of processes(P1, P2, P3, P4)\r print(\r f\"\"\"waiting time:\\\n \\t\\t\\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r )\r # print completion times of processes(P1, P2, P3, P4)\r print(\r f\"\"\"completion time:\\\n \\t\\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r )\r # print total turnaround times of processes(P1, P2, P3, P4)\r print(\r f\"\"\"turnaround time:\\\n \\t\\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}\"\"\"\r )\r # print sequence of finished processes\r print(\r f\"\"\"sequence of finished processes:\\\n {mlfq.calculate_sequence_of_finish_queue()}\"\"\"\r )"},"code_codestyle":{"kind":"number","value":718,"string":"718"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\nimport inspect\r\nfrom typing import List, Optional, Tuple, Union\r\n\r\nimport numpy as np\r\nimport PIL\r\nimport torch\r\nimport torch.utils.checkpoint\r\n\r\nfrom ...models import UNetaDModel, VQModel\r\nfrom ...schedulers import (\r\n DDIMScheduler,\r\n DPMSolverMultistepScheduler,\r\n EulerAncestralDiscreteScheduler,\r\n EulerDiscreteScheduler,\r\n LMSDiscreteScheduler,\r\n PNDMScheduler,\r\n)\r\nfrom ...utils import PIL_INTERPOLATION, randn_tensor\r\nfrom ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput\r\n\r\n\r\n\r\ndef __UpperCAmelCase\t(\t\t\t\tSCREAMING_SNAKE_CASE__:\tList[Any] ) ->\t\t\tDict:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n __a ,\t\t\t__a\t\t\t\t = image.size\r\n __a ,\t\t\t__a\t\t\t\t = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32\r\n __a\t\t\t\t = image.resize((w, h),\t\t\t\t\tresample=PIL_INTERPOLATION['lanczos'] )\r\n __a\t\t\t\t = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_5_5.0\r\n __a\t\t\t\t = image[None].transpose(0,\t\t\t\t\t3,\t\t\t\t\t1,\t\t\t\t\t2 )\r\n __a\t\t\t\t = torch.from_numpy(SCREAMING_SNAKE_CASE__ )\r\n return 2.0 * image - 1.0\r\n\r\n\r\n\r\nclass \t\t\t\t\t\t\t__SCREAMING_SNAKE_CASE (\t\t\t\t\t\t\t_lowerCAmelCase ):\r\n\r\n\r\n\r\n\r\n def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , )\t\t\t\t\t\t\t->Dict:\r\n\r\n\r\n\r\n '''simple docstring'''\r\n super().__init__()\r\n self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )\r\n\r\n\r\n\r\n\r\n\r\n\r\n @torch.no_grad()\r\n def __call__( self , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 100 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = \"pil\" , lowerCamelCase = True , )\t\t\t\t\t\t\t->Union[Tuple, ImagePipelineOutput]:\r\n\r\n\r\n\r\n '''simple docstring'''\r\n if isinstance(lowerCamelCase , PIL.Image.Image ):\r\n __a\t\t\t\t = 1\r\n elif isinstance(lowerCamelCase , torch.Tensor ):\r\n __a\t\t\t\t = image.shape[0]\r\n else:\r\n raise ValueError(F\"\"\"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}\"\"\" )\r\n\r\n if isinstance(lowerCamelCase , PIL.Image.Image ):\r\n __a\t\t\t\t = preprocess(lowerCamelCase )\r\n\r\n __a ,\t\t\t__a\t\t\t\t = image.shape[-2:]\r\n\r\n # in_channels should be 6: 3 for latents, 3 for low resolution image\r\n __a\t\t\t\t = (batch_size, self.unet.config.in_channels // 2, height, width)\r\n __a\t\t\t\t = next(self.unet.parameters() ).dtype\r\n\r\n __a\t\t\t\t = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )\r\n\r\n __a\t\t\t\t = image.to(device=self.device , dtype=lowerCamelCase )\r\n\r\n # set timesteps and move to the correct device\r\n self.scheduler.set_timesteps(lowerCamelCase , device=self.device )\r\n __a\t\t\t\t = self.scheduler.timesteps\r\n\r\n # scale the initial noise by the standard deviation required by the scheduler\r\n __a\t\t\t\t = latents * self.scheduler.init_noise_sigma\r\n\r\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.\r\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\r\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\r\n # and should be between [0, 1]\r\n __a\t\t\t\t = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )\r\n __a\t\t\t\t = {}\r\n if accepts_eta:\r\n __a\t\t\t\t = eta\r\n\r\n for t in self.progress_bar(lowerCamelCase ):\r\n # concat latents and low resolution image in the channel dimension.\r\n __a\t\t\t\t = torch.cat([latents, image] , dim=1 )\r\n __a\t\t\t\t = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )\r\n # predict the noise residual\r\n __a\t\t\t\t = self.unet(lowerCamelCase , lowerCamelCase ).sample\r\n # compute the previous noisy sample x_t -> x_t-1\r\n __a\t\t\t\t = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample\r\n\r\n # decode the image latents with the VQVAE\r\n __a\t\t\t\t = self.vqvae.decode(lowerCamelCase ).sample\r\n __a\t\t\t\t = torch.clamp(lowerCamelCase , -1.0 , 1.0 )\r\n __a\t\t\t\t = image / 2 + 0.5\r\n __a\t\t\t\t = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()\r\n\r\n if output_type == \"pil\":\r\n __a\t\t\t\t = self.numpy_to_pil(lowerCamelCase )\r\n\r\n if not return_dict:\r\n return (image,)\r\n\r\n return ImagePipelineOutput(images=lowerCamelCase )"},"style_context_codestyle":{"kind":"number","value":270,"string":"270"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":574,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nimport gc\r\nimport unittest\r\n\r\nfrom diffusers import FlaxStableDiffusionInpaintPipeline\r\nfrom diffusers.utils import is_flax_available, load_image, slow\r\nfrom diffusers.utils.testing_utils import require_flax\r\n\r\n\r\nif is_flax_available():\r\n\t\timport jax\r\n\t\timport jax.numpy as jnp\r\n\t\tfrom flax.jax_utils import replicate\r\n\t\tfrom flax.training.common_utils import shard\r\n\r\n\r\n\r\n\r\n\r\n\r\n@slow\r\n@require_flax\r\nclass UpperCamelCase__\t(\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __a (\t\t\t\t\tself : Any\t\t):\r\n\r\n\r\n\r\n\r\n\t\t\t\t'''simple docstring'''\r\n\t\t\t\t# clean up the VRAM after each test\r\n\t\t\t\tsuper().tearDown()\r\n\t\t\t\tgc.collect()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\tdef __a (\t\t\t\t\tself : Union[str, Any]\t\t):\r\n\r\n\r\n\r\n\r\n\t\t\t\t'''simple docstring'''\r\n\t\t\t\ta__\t =\t\t\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\r\n\t\t\t\t \"/sd2-inpaint/init_image.png\"\t\t)\r\n\t\t\t\ta__\t =\t\t\tload_image(\r\n\t\t\t\t \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png\"\t\t)\r\n\r\n\t\t\t\ta__\t =\t\t\t\"xvjiarui/stable-diffusion-2-inpainting\"\r\n\t\t\t\ta__ ,\t\t\t\t\ta__\t =\t\t\tFlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase\t\t\t,\t\t\tsafety_checker=lowerCamelCase\t\t)\r\n\r\n\t\t\t\ta__\t =\t\t\t\"Face of a yellow cat, high resolution, sitting on a park bench\"\r\n\r\n\t\t\t\ta__\t =\t\t\tjax.random.PRNGKey(0\t\t)\r\n\t\t\t\ta__\t =\t\t\t5_0\r\n\r\n\t\t\t\ta__\t =\t\t\tjax.device_count()\r\n\t\t\t\ta__\t =\t\t\tnum_samples * [prompt]\r\n\t\t\t\ta__\t =\t\t\tnum_samples * [init_image]\r\n\t\t\t\ta__\t =\t\t\tnum_samples * [mask_image]\r\n\t\t\t\ta__ ,\t\t\t\t\ta__ ,\t\t\t\t\ta__\t =\t\t\tpipeline.prepare_inputs(lowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t)\r\n\r\n\t\t\t\t# shard inputs and rng\r\n\t\t\t\ta__\t =\t\t\treplicate(lowerCamelCase\t\t)\r\n\t\t\t\ta__\t =\t\t\tjax.random.split(lowerCamelCase\t\t\t,\t\t\tjax.device_count()\t\t)\r\n\t\t\t\ta__\t =\t\t\tshard(lowerCamelCase\t\t)\r\n\t\t\t\ta__\t =\t\t\tshard(lowerCamelCase\t\t)\r\n\t\t\t\ta__\t =\t\t\tshard(lowerCamelCase\t\t)\r\n\r\n\t\t\t\ta__\t =\t\t\tpipeline(\r\n\t\t\t\t lowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tlowerCamelCase\t\t\t,\t\t\tjit=lowerCamelCase\t\t)\r\n\r\n\t\t\t\ta__\t =\t\t\toutput.images.reshape(lowerCamelCase\t\t\t,\t\t\t5_1_2\t\t\t,\t\t\t5_1_2\t\t\t,\t\t\t3\t\t)\r\n\r\n\t\t\t\ta__\t =\t\t\timages[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]\r\n\r\n\t\t\t\ta__\t =\t\t\tjnp.asarray(jax.device_get(image_slice.flatten()\t\t)\t\t)\r\n\t\t\t\ta__\t =\t\t\tjnp.array(\r\n\t\t\t\t [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]\t\t)\r\n\t\t\t\tprint(F'''output_slice: {output_slice}'''\t\t)\r\n\t\t\t\tassert jnp.abs(output_slice - expected_slice\t\t).max() < 1e-2\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":489,"string":"489"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\ndef _lowerCamelCase\t(__lowerCamelCase\t: list[int] ,\t__lowerCamelCase\t: list[int] ,\t__lowerCamelCase\t: int ) -> bool:\r\n\t\t\treturn not any(\r\n\t\t\t neighbour == 1 and colored_vertices[i] == color\r\n\t\t\t for i, neighbour in enumerate(__lowerCamelCase ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _lowerCamelCase\t(__lowerCamelCase\t: list[list[int]] ,\t__lowerCamelCase\t: int ,\t__lowerCamelCase\t: list[int] ,\t__lowerCamelCase\t: int ) -> bool:\r\n\r\n\t\t\t# Base Case\r\n\t\t\tif index == len(__lowerCamelCase ):\r\n\t\t\t\t\t\treturn True\r\n\r\n\t\t\t# Recursive Step\r\n\t\t\tfor i in range(__lowerCamelCase ):\r\n\t\t\t\t\t\tif valid_coloring(graph[index] ,\t__lowerCamelCase ,\t__lowerCamelCase ):\r\n\t\t\t\t\t\t\t\t\t# Color current vertex\r\n\t\t\t\t\t\t\t\t\ta__\t =\t\t\ti\r\n\t\t\t\t\t\t\t\t\t# Validate coloring\r\n\t\t\t\t\t\t\t\t\tif util_color(__lowerCamelCase ,\t__lowerCamelCase ,\t__lowerCamelCase ,\tindex + 1 ):\r\n\t\t\t\t\t\t\t\t\t\t\t\treturn True\r\n\t\t\t\t\t\t\t\t\t# Backtrack\r\n\t\t\t\t\t\t\t\t\ta__\t =\t\t\t-1\r\n\t\t\treturn False\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _lowerCamelCase\t(__lowerCamelCase\t: list[list[int]] ,\t__lowerCamelCase\t: int ) -> list[int]:\r\n\t\t\ta__\t =\t\t\t[-1] * len(__lowerCamelCase )\r\n\r\n\t\t\tif util_color(__lowerCamelCase ,\t__lowerCamelCase ,\t__lowerCamelCase ,\t0 ):\r\n\t\t\t\t\t\treturn colored_vertices\r\n\r\n\t\t\treturn []\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":489,"string":"489"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":575,"cells":{"code":{"kind":"string","value":"\n\n\n\n\nimport argparse\nimport os\n\nimport gluonnlp as nlp\nimport mxnet as mx\nimport numpy as np\nimport torch\nfrom gluonnlp.base import get_home_dir\nfrom gluonnlp.model.bert import BERTEncoder\nfrom gluonnlp.model.utils import _load_vocab\nfrom gluonnlp.vocab import Vocab\nfrom packaging import version\nfrom torch import nn\n\nfrom transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer\nfrom transformers.models.bert.modeling_bert import (\n BertIntermediate,\n BertLayer,\n BertOutput,\n BertSelfAttention,\n BertSelfOutput,\n)\nfrom transformers.utils import logging\n\n\nif version.parse(nlp.__version__) != version.parse('''0.8.3'''):\n raise Exception('''requires gluonnlp == 0.8.3''')\n\nif version.parse(mx.__version__) != version.parse('''1.5.0'''):\n raise Exception('''requires mxnet == 1.5.0''')\n\nlogging.set_verbosity_info()\n__lowerCamelCase :\t\t\t\t\t\tstr\t\t\t\t=\t\t\tlogging.get_logger(__name__)\n\n__lowerCamelCase :\t\t\t\t\t\tDict\t\t\t\t=\t\t\t\"\"\"The Nymphenburg Palace is a beautiful palace in Munich!\"\"\"\n\n\n\n\n\n\n\ndef \t\tlowercase__ (\t\t__A:\tOptional[int]\t\t\t\t\t\t\t,__A:\tint\t\t\t\t):\n\n\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t{\n \"attention_cell\": \"multi_head\",\n \"num_layers\": 4,\n \"units\": 1_0_2_4,\n \"hidden_size\": 7_6_8,\n \"max_length\": 5_1_2,\n \"num_heads\": 8,\n \"scaled\": True,\n \"dropout\": 0.1,\n \"use_residual\": True,\n \"embed_size\": 1_0_2_4,\n \"embed_dropout\": 0.1,\n \"word_embed\": None,\n \"layer_norm_eps\": 1e-5,\n \"token_type_vocab_size\": 2,\n }\n\n __magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tbort_4_8_768_1024_hparams\n\n # Let's construct the original Bort model here\n # Taken from official BERT implementation, see:\n # https://github.com/alexa/bort/blob/master/bort/bort.py\n __magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBERTEncoder(\n attention_cell=predefined_args['''attention_cell''']\t\t\t\t\t\t\t,num_layers=predefined_args['''num_layers''']\t\t\t\t\t\t\t,units=predefined_args['''units''']\t\t\t\t\t\t\t,hidden_size=predefined_args['''hidden_size''']\t\t\t\t\t\t\t,max_length=predefined_args['''max_length''']\t\t\t\t\t\t\t,num_heads=predefined_args['''num_heads''']\t\t\t\t\t\t\t,scaled=predefined_args['''scaled''']\t\t\t\t\t\t\t,dropout=predefined_args['''dropout''']\t\t\t\t\t\t\t,output_attention=__A\t\t\t\t\t\t\t,output_all_encodings=__A\t\t\t\t\t\t\t,use_residual=predefined_args['''use_residual''']\t\t\t\t\t\t\t,activation=predefined_args.get('''activation'''\t\t\t\t\t\t\t,'''gelu'''\t\t\t\t)\t\t\t\t\t\t\t,layer_norm_eps=predefined_args.get('''layer_norm_eps'''\t\t\t\t\t\t\t,__A\t\t\t\t)\t\t\t\t\t\t\t,)\n\n # Vocab information needs to be fetched first\n # It's the same as RoBERTa, so RobertaTokenizer can be used later\n __magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t\"openwebtext_ccnews_stories_books_cased\"\n\n # Specify download folder to Gluonnlp's vocab\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tos.path.join(get_home_dir()\t\t\t\t\t\t\t,'''models'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\t_load_vocab(__A\t\t\t\t\t\t\t,__A\t\t\t\t\t\t\t,__A\t\t\t\t\t\t\t,cls=__A\t\t\t\t)\n\n __magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\tnlp.model.BERTModel(\n __A\t\t\t\t\t\t\t,len(__A\t\t\t\t)\t\t\t\t\t\t\t,units=predefined_args['''units''']\t\t\t\t\t\t\t,embed_size=predefined_args['''embed_size''']\t\t\t\t\t\t\t,embed_dropout=predefined_args['''embed_dropout''']\t\t\t\t\t\t\t,word_embed=predefined_args['''word_embed''']\t\t\t\t\t\t\t,use_pooler=__A\t\t\t\t\t\t\t,use_token_type_embed=__A\t\t\t\t\t\t\t,token_type_vocab_size=predefined_args['''token_type_vocab_size''']\t\t\t\t\t\t\t,use_classifier=__A\t\t\t\t\t\t\t,use_decoder=__A\t\t\t\t\t\t\t,)\n\n original_bort.load_parameters(__A\t\t\t\t\t\t\t,cast_dtype=__A\t\t\t\t\t\t\t,ignore_extra=__A\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tstr \t\t\t\t\t\t\t=\t\t\t\t\t\t\toriginal_bort._collect_params_with_prefix()\n\n # Build our config 🤗\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t{\n \"architectures\": [\"BertForMaskedLM\"],\n \"attention_probs_dropout_prob\": predefined_args[\"dropout\"],\n \"hidden_act\": \"gelu\",\n \"hidden_dropout_prob\": predefined_args[\"dropout\"],\n \"hidden_size\": predefined_args[\"embed_size\"],\n \"initializer_range\": 0.02,\n \"intermediate_size\": predefined_args[\"hidden_size\"],\n \"layer_norm_eps\": predefined_args[\"layer_norm_eps\"],\n \"max_position_embeddings\": predefined_args[\"max_length\"],\n \"model_type\": \"bort\",\n \"num_attention_heads\": predefined_args[\"num_heads\"],\n \"num_hidden_layers\": predefined_args[\"num_layers\"],\n \"pad_token_id\": 1, # 2 = BERT, 1 = RoBERTa\n \"type_vocab_size\": 1, # 2 = BERT, 1 = RoBERTa\n \"vocab_size\": len(__A\t\t\t\t),\n }\n\n __magic_name__\t\t\t\t\t\t\t:\tstr \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBertConfig.from_dict(__A\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBertForMaskedLM(__A\t\t\t\t)\n hf_bort_model.eval()\n\n # Parameter mapping table (Gluonnlp to Transformers)\n # * denotes layer index\n #\n # | Gluon Parameter | Transformers Parameter\n # | -------------------------------------------------------------- | ----------------------\n # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`\n # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`\n # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`\n # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`\n # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`\n # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`\n # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`\n # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`\n # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`\n # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`\n # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`\n # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`\n # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`\n # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`\n # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`\n # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`\n # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`\n # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`\n # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`\n # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`\n\n # Helper function to convert MXNET Arrays to PyTorch\n def to_torch(__A:\tOptional[Any]\t\t\t\t) -> nn.Parameter:\n return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()\t\t\t\t)\t\t\t\t)\n\n # Check param shapes and map new HF param back\n def check_and_map_params(__A:\tAny\t\t\t\t\t\t\t,__A:\tUnion[str, Any]\t\t\t\t):\n __magic_name__\t\t\t\t\t\t\t:\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t\t\thf_param.shape\n\n __magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tto_torch(params[gluon_param]\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tgluon_param.shape\n\n assert (\n shape_hf == shape_gluon\n ), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''\n\n return gluon_param\n\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n hf_bort_model.bert.embeddings.word_embeddings.weight\t\t\t\t\t\t\t,'''word_embed.0.weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n hf_bort_model.bert.embeddings.position_embeddings.weight\t\t\t\t\t\t\t,'''encoder.position_weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n hf_bort_model.bert.embeddings.LayerNorm.bias\t\t\t\t\t\t\t,'''encoder.layer_norm.beta'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n hf_bort_model.bert.embeddings.LayerNorm.weight\t\t\t\t\t\t\t,'''encoder.layer_norm.gamma'''\t\t\t\t)\n\n # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttorch.zeros_like(\n hf_bort_model.bert.embeddings.token_type_embeddings.weight.data\t\t\t\t)\n\n for i in range(hf_bort_config.num_hidden_layers\t\t\t\t):\n __magic_name__\t\t\t\t\t\t\t:\tBertLayer \t\t\t\t\t\t\t=\t\t\t\t\t\t\thf_bort_model.bert.encoder.layer[i]\n\n # self attention\n __magic_name__\t\t\t\t\t\t\t:\tBertSelfAttention \t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer.attention.self\n\n __magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.key.bias.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias'''\t\t\t\t)\n\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.key.weight.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tstr \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.query.bias.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.query.weight.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.value.bias.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_attn.value.weight.data\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight'''\t\t\t\t)\n\n # self attention output\n __magic_name__\t\t\t\t\t\t\t:\tBertSelfOutput \t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer.attention.output\n\n __magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_output.dense.bias\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.proj.bias'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_output.dense.weight\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.proj.weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_output.LayerNorm.bias\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.layer_norm.beta'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n self_output.LayerNorm.weight\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.layer_norm.gamma'''\t\t\t\t)\n\n # intermediate\n __magic_name__\t\t\t\t\t\t\t:\tBertIntermediate \t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer.intermediate\n\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n intermediate.dense.bias\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n intermediate.dense.weight\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight'''\t\t\t\t)\n\n # output\n __magic_name__\t\t\t\t\t\t\t:\tBertOutput \t\t\t\t\t\t\t=\t\t\t\t\t\t\tlayer.output\n\n __magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n bert_output.dense.bias\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n bert_output.dense.weight\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n bert_output.LayerNorm.bias\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tcheck_and_map_params(\n bert_output.LayerNorm.weight\t\t\t\t\t\t\t,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma'''\t\t\t\t)\n\n # Save space and energy 🎄\n hf_bort_model.half()\n\n # Compare output of both models\n __magic_name__\t\t\t\t\t\t\t:\tDict \t\t\t\t\t\t\t=\t\t\t\t\t\t\tRobertaTokenizer.from_pretrained('''roberta-base'''\t\t\t\t)\n\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttokenizer.encode_plus(__A\t\t\t\t)[\"input_ids\"]\n\n # Get gluon output\n __magic_name__\t\t\t\t\t\t\t:\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tmx.nd.array([input_ids]\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\toriginal_bort(inputs=__A\t\t\t\t\t\t\t,token_types=[]\t\t\t\t)\n\n # Get Transformer output (save and reload model again)\n hf_bort_model.save_pretrained(__A\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBertModel.from_pretrained(__A\t\t\t\t)\n hf_bort_model.eval()\n\n __magic_name__\t\t\t\t\t\t\t:\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttokenizer.encode_plus(__A\t\t\t\t\t\t\t,return_tensors='''pt'''\t\t\t\t)\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\thf_bort_model(**__A\t\t\t\t)[0]\n\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\toutput_gluon[0].asnumpy()\n __magic_name__\t\t\t\t\t\t\t:\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\toutput_hf[0].detach().numpy()\n\n __magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tnp.max(np.abs(hf_layer - gluon_layer\t\t\t\t)\t\t\t\t).item()\n __magic_name__\t\t\t\t\t\t\t:\tOptional[int] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tnp.allclose(__A\t\t\t\t\t\t\t,__A\t\t\t\t\t\t\t,atol=1e-3\t\t\t\t)\n\n if success:\n print('''✔️ Both model do output the same tensors'''\t\t\t\t)\n else:\n print('''❌ Both model do **NOT** output the same tensors'''\t\t\t\t)\n print('''Absolute difference is:'''\t\t\t\t\t\t\t,__A\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n __lowerCamelCase :\t\t\t\t\t\tTuple\t\t\t\t=\t\t\targparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''\n )\n parser.add_argument(\n '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''\n )\n __lowerCamelCase :\t\t\t\t\t\tOptional[Any]\t\t\t\t=\t\t\tparser.parse_args()\n convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)\n\n\n\n"},"code_codestyle":{"kind":"number","value":719,"string":"719"},"style_context":{"kind":"string","value":"\n\n\n\n\nimport json\nimport os\nimport unittest\n\nfrom transformers.models.blenderbot_small.tokenization_blenderbot_small import (\n VOCAB_FILES_NAMES,\n BlenderbotSmallTokenizer,\n)\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\nclass lowerCamelCase (\t\t_lowerCamelCase\t\t\t\t,unittest.TestCase\t\t\t\t\t\t):\n\n\n\n\n\n\n\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\tUpperCamelCase__\t\t\t\t\t\t\t\t=BlenderbotSmallTokenizer\n\t\t\tUpperCamelCase__\t\t\t\t\t\t\t\t=False\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> str:\n\t\t\t\t\t\t\t\tsuper().setUp()\n\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tdict(zip(lowerCamelCase_ ,\t\t\t\t\t\trange(len(lowerCamelCase_ ) ) ) )\n\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\t['''#version: 0.2''', '''a p''', '''t e''', '''ap t''', '''a d''', '''ad apt''', '''a c''', '''ac t''', '''''']\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t{'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}\n\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tstr \t\t\t\t\t\t\t=\t\t\t\t\t\t\tos.path.join(self.tmpdirname ,\t\t\t\t\t\tVOCAB_FILES_NAMES['''vocab_file'''] )\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\tos.path.join(self.tmpdirname ,\t\t\t\t\t\tVOCAB_FILES_NAMES['''merges_file'''] )\n\t\t\t\t\t\t\t\twith open(self.vocab_file ,\t\t\t\t\t\t'''w''' ,\t\t\t\t\t\tencoding='''utf-8''' ) as fp:\n\t\t\t\t\t\t\t\t\t\t\t\t\tfp.write(json.dumps(lowerCamelCase_ ) + '''\\n''' )\n\t\t\t\t\t\t\t\twith open(self.merges_file ,\t\t\t\t\t\t'''w''' ,\t\t\t\t\t\tencoding='''utf-8''' ) as fp:\n\t\t\t\t\t\t\t\t\t\t\t\t\tfp.write('''\\n'''.join(lowerCamelCase_ ) )\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: List[str] ,\t\t\t\t\t\t**lowerCamelCase_\t\t\t: Optional[Any] )\t\t\t\t\t\t-> List[Any]:\n\t\t\t\t\t\t\t\tkwargs.update(self.special_tokens_map )\n\t\t\t\t\t\t\t\treturn BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,\t\t\t\t\t\t**lowerCamelCase_ )\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: Dict ,\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> Optional[int]:\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''adapt act apte'''\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tDict \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''adapt act apte'''\n\t\t\t\t\t\t\t\treturn input_text, output_text\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> Any:\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tint \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBlenderbotSmallTokenizer(self.vocab_file ,\t\t\t\t\t\tself.merges_file ,\t\t\t\t\t\t**self.special_tokens_map )\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tstr \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''adapt act apte'''\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\t['''adapt''', '''act''', '''ap@@''', '''te''']\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttokenizer.tokenize(lowerCamelCase_ )\n\t\t\t\t\t\t\t\tself.assertListEqual(lowerCamelCase_ ,\t\t\t\t\t\tlowerCamelCase_ )\n\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tUnion[str, Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t[tokenizer.bos_token] + tokens + [tokenizer.eos_token]\n\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tList[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t[0, 1, 2, 3, 4, 5]\n\t\t\t\t\t\t\t\tself.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,\t\t\t\t\t\tlowerCamelCase_ )\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: int )\t\t\t\t\t\t-> int:\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )\n\t\t\t\t\t\t\t\tassert tok('''sam''' ).input_ids == [1384]\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tDict \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''I am a small frog.'''\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttok([src_text] ,\t\t\t\t\t\tpadding=lowerCamelCase_ ,\t\t\t\t\t\ttruncation=lowerCamelCase_ )['''input_ids''']\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttok.batch_decode(lowerCamelCase_ ,\t\t\t\t\t\tskip_special_tokens=lowerCamelCase_ ,\t\t\t\t\t\tclean_up_tokenization_spaces=lowerCamelCase_ )[0]\n\t\t\t\t\t\t\t\tassert src_text != decoded # I wish it did!\n\t\t\t\t\t\t\t\tassert decoded == \"i am a small frog .\"\n\n\n\t\t\tdef \t\t\t\t\t\t\tUpperCAmelCase__ ( self\t\t\t: Tuple )\t\t\t\t\t\t-> Dict:\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\tBlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tAny \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''I am a small frog .'''\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tList[str] \t\t\t\t\t\t\t=\t\t\t\t\t\t\t'''.'''\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tTuple \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttok(lowerCamelCase_ )['''input_ids''']\n\t\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t:\tOptional[Any] \t\t\t\t\t\t\t=\t\t\t\t\t\t\ttok(lowerCamelCase_ )['''input_ids''']\n\n\t\t\t\t\t\t\t\tassert encoded[-1] == encoded_dot[0]\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":501,"string":"501"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":576,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Any\r\nclass \t\t\t\t\tSCREAMING_SNAKE_CASE__ :\r\n def __init__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tOptional[Any]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tint\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tint\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tfloat = 0\t)\t\t\t\t\t\t-> None:\r\n a_\t\t\t\t,\t\t\t\t\t\ta_ :\t\t\t\t\t\tList[Any]\t\t\t\t= row, column\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= [[default_value for c in range(SCREAMING_SNAKE_CASE__\t)] for r in range(SCREAMING_SNAKE_CASE__\t)]\r\n def __str__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tList[Any]\t)\t\t\t\t\t\t-> str:\r\n a_ :\t\t\t\t\t\tList[str]\t\t\t\t= F\"\"\"Matrix consist of {self.row} rows and {self.column} columns\\n\"\"\"\r\n\r\n # Make string identifier\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= 0\r\n for row_vector in self.array:\r\n for obj in row_vector:\r\n a_ :\t\t\t\t\t\tstr\t\t\t\t= max(SCREAMING_SNAKE_CASE__\t,\t\t\t\tlen(str(SCREAMING_SNAKE_CASE__\t)\t)\t)\r\n a_ :\t\t\t\t\t\tOptional[int]\t\t\t\t= F\"\"\"%{max_element_length}s\"\"\"\r\n\r\n # Make string and return\r\n def single_line(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tlist[float]\t) -> str:\r\n nonlocal string_format_identifier\r\n a_ :\t\t\t\t\t\tList[str]\t\t\t\t= '['\r\n line += \", \".join(string_format_identifier % (obj,) for obj in row_vector\t)\r\n line += \"]\"\r\n return line\r\n\r\n s += \"\\n\".join(single_line(SCREAMING_SNAKE_CASE__\t) for row_vector in self.array\t)\r\n return s\r\n def __repr__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tList[Any]\t)\t\t\t\t\t\t-> str:\r\n return str(self\t)\r\n def \t\t\tSCREAMING_SNAKE_CASE\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t:\t\t\tList[str]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\ttuple[int, int]\t)\t\t\t\t\t\t-> bool:\r\n if not (isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\t(list, tuple)\t) and len(SCREAMING_SNAKE_CASE__\t) == 2):\r\n return False\r\n elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):\r\n return False\r\n else:\r\n return True\r\n def __getitem__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tUnion[str, Any]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\ttuple[int, int]\t)\t\t\t\t\t\t-> Any:\r\n assert self.validate_indicies(SCREAMING_SNAKE_CASE__\t)\r\n return self.array[loc[0]][loc[1]]\r\n def __setitem__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tOptional[Any]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\ttuple[int, int]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tfloat\t)\t\t\t\t\t\t-> None:\r\n assert self.validate_indicies(SCREAMING_SNAKE_CASE__\t)\r\n a_ :\t\t\t\t\t\tList[str]\t\t\t\t= value\r\n def __add__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tList[str]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tMatrix\t)\t\t\t\t\t\t-> Matrix:\r\n assert isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t)\r\n assert self.row == another.row and self.column == another.column\r\n\r\n # Add\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= Matrix(self.row\t,\t\t\t\tself.column\t)\r\n for r in range(self.row\t):\r\n for c in range(self.column\t):\r\n a_ :\t\t\t\t\t\tAny\t\t\t\t= self[r, c] + another[r, c]\r\n return result\r\n def __neg__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tOptional[int]\t)\t\t\t\t\t\t-> Matrix:\r\n a_ :\t\t\t\t\t\tAny\t\t\t\t= Matrix(self.row\t,\t\t\t\tself.column\t)\r\n for r in range(self.row\t):\r\n for c in range(self.column\t):\r\n a_ :\t\t\t\t\t\tTuple\t\t\t\t= -self[r, c]\r\n return result\r\n def __sub__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tOptional[int]\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tMatrix\t)\t\t\t\t\t\t-> Matrix:\r\n return self + (-another)\r\n def __mul__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tint\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tint | float | Matrix\t)\t\t\t\t\t\t-> Matrix:\r\n\r\n if isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\t(int, float)\t): # Scalar multiplication\r\n a_ :\t\t\t\t\t\tint\t\t\t\t= Matrix(self.row\t,\t\t\t\tself.column\t)\r\n for r in range(self.row\t):\r\n for c in range(self.column\t):\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= self[r, c] * another\r\n return result\r\n elif isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t): # Matrix multiplication\r\n assert self.column == another.row\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= Matrix(self.row\t,\t\t\t\tanother.column\t)\r\n for r in range(self.row\t):\r\n for c in range(another.column\t):\r\n for i in range(self.column\t):\r\n result[r, c] += self[r, i] * another[i, c]\r\n return result\r\n else:\r\n a_ :\t\t\t\t\t\tAny\t\t\t\t= F\"\"\"Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__\t)})\"\"\"\r\n raise TypeError(SCREAMING_SNAKE_CASE__\t)\r\n def \t\t\tSCREAMING_SNAKE_CASE\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t:\t\t\tstr\t)\t\t\t\t\t\t-> Matrix:\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= Matrix(self.column\t,\t\t\t\tself.row\t)\r\n for r in range(self.row\t):\r\n for c in range(self.column\t):\r\n a_ :\t\t\t\t\t\tAny\t\t\t\t= self[r, c]\r\n return result\r\n\r\n def \t\t\tSCREAMING_SNAKE_CASE\t\t\t\t(\t\t\tself\t\t\t\t\t\t\t:\t\t\tstr\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tMatrix\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tMatrix\t)\t\t\t\t\t\t-> Any:\r\n assert isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t) and isinstance(SCREAMING_SNAKE_CASE__\t,\t\t\t\tSCREAMING_SNAKE_CASE__\t)\r\n assert self.row == self.column == u.row == v.row # u, v should be column vector\r\n assert u.column == v.column == 1 # u, v should be column vector\r\n\r\n # Calculate\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= v.transpose()\r\n a_ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t= (v_t * self * u)[0, 0] + 1\r\n if numerator_factor == 0:\r\n return None # It's not invertable\r\n return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))\r\n\r\n\r\n# Testing\r\nif __name__ == \"__main__\":\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tSCREAMING_SNAKE_CASE_\t\t( ) ->\t\t\t\t\t\tNone:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n a_ :\t\t\t\t\t\tAny\t\t\t\t= Matrix(3 , 3 , 0\t\t\t\t\t\t\t)\r\n for i in range(3\t\t\t\t\t\t\t):\r\n a_ :\t\t\t\t\t\tint\t\t\t\t= 1\r\n print(F\"\"\"a^(-1) is {ainv}\"\"\"\t\t\t\t\t\t\t)\r\n # u, v\r\n a_ :\t\t\t\t\t\tDict\t\t\t\t= Matrix(3 , 1 , 0\t\t\t\t\t\t\t)\r\n a_\t\t\t\t,\t\t\t\t\t\ta_\t\t\t\t,\t\t\t\t\t\ta_ :\t\t\t\t\t\tTuple\t\t\t\t= 1, 2, -3\r\n a_ :\t\t\t\t\t\tList[Any]\t\t\t\t= Matrix(3 , 1 , 0\t\t\t\t\t\t\t)\r\n a_\t\t\t\t,\t\t\t\t\t\ta_\t\t\t\t,\t\t\t\t\t\ta_ :\t\t\t\t\t\tUnion[str, Any]\t\t\t\t= 4, -2, 5\r\n print(F\"\"\"u is {u}\"\"\"\t\t\t\t\t\t\t)\r\n print(F\"\"\"v is {v}\"\"\"\t\t\t\t\t\t\t)\r\n print(F\"\"\"uv^T is {u * v.transpose()}\"\"\"\t\t\t\t\t\t\t)\r\n # Sherman Morrison\r\n print(F\"\"\"(a + uv^T)^(-1) is {ainv.sherman_morrison(__A , __A\t\t\t\t\t\t\t)}\"\"\"\t\t\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t\tSCREAMING_SNAKE_CASE_\t\t( ) ->\t\t\t\t\t\tNone:\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n import doctest\r\n\r\n doctest.testmod()\r\n testa()\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":570,"string":"570"},"style_context":{"kind":"string","value":"\r\n\r\n\r\nfrom ..utils import DummyObject, requires_backends\r\nclass \t\t\t\t\tSCREAMING_SNAKE_CASE__ (\t\t\t\t\t\t\tmetaclass=lowercase__ ):\r\n snake_case__\t\t\t\t\t\t\t:\tList[str]\t =\t\t['''onnx''']\r\n def __init__(\t\t\tself\t\t\t\t\t\t\t:\t\t\tList[Any]\t,\t\t\t\t*SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tDict\t,\t\t\t\t**SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tDict\t)\t\t\t\t\t\t-> Any:\r\n requires_backends(self\t,\t\t\t\t['onnx']\t)\r\n @classmethod\r\n def \t\t\tSCREAMING_SNAKE_CASE\t\t\t\t(\t\t\tcls\t\t\t\t\t\t\t:\t\t\tList[Any]\t,\t\t\t\t*SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tint\t,\t\t\t\t**SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tAny\t)\t\t\t\t\t\t-> Optional[int]:\r\n requires_backends(cls\t,\t\t\t\t['onnx']\t)\r\n\r\n @classmethod\r\n def \t\t\tSCREAMING_SNAKE_CASE\t\t\t\t(\t\t\tcls\t\t\t\t\t\t\t:\t\t\tstr\t,\t\t\t\t*SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tDict\t,\t\t\t\t**SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t:\t\t\tList[Any]\t)\t\t\t\t\t\t-> Union[str, Any]:\r\n requires_backends(cls\t,\t\t\t\t['onnx']\t)\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":570,"string":"570"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":577,"cells":{"code":{"kind":"string","value":"\n'''simple docstring'''\n\n\n\n\n\n\n\nimport unittest\n\nfrom huggingface_hub import hf_hub_download\n\nfrom transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor\nfrom transformers.pipelines import VideoClassificationPipeline, pipeline\nfrom transformers.testing_utils import (\n is_pipeline_test,\n nested_simplify,\n require_decord,\n require_tf,\n require_torch,\n require_torch_or_tf,\n require_vision,\n)\n\nfrom .test_pipelines_common import ANY\n\n\n@is_pipeline_test\n@require_torch_or_tf\n@require_vision\n@require_decord\nclass \t\t\tlowerCAmelCase (\t\t\t\tunittest.TestCase\t\t\t):\n\t\ta\t\t\t\t\t\t\t:\t\t\t\t\tUnion[str, Any] =\t\t\t\t\tMODEL_FOR_VIDEO_CLASSIFICATION_MAPPING\n\n\n\n\n\t\tdef lowercase (\t\t\t\tself\t\t\t\t\t,\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tUpperCamelCase ):\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= hf_hub_download(\n\t\t\t\t\t\t\t\t repo_id=\"nateraw/video-demo\"\t\t\t\t\t,\t\t\t\t\t\tfilename=\"archery.mp4\"\t\t\t\t\t,\t\t\t\t\t\trepo_type=\"dataset\" )\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= VideoClassificationPipeline(model=UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\timage_processor=UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\ttop_k=2 )\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [\n\t\t\t\t\t\t\t\t example_video_filepath,\n\t\t\t\t\t\t\t\t \"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4\",\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\treturn video_classifier, examples\n\n\n\n\n\t\tdef lowercase (\t\t\t\tself\t\t\t\t\t,\t\t\t\t\t\tUpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tUpperCamelCase ):\n\t\t\t\t\t\t\t\tfor example in examples:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= video_classifier(UpperCamelCase )\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {\"score\": ANY(UpperCamelCase ), \"label\": ANY(UpperCamelCase )},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {\"score\": ANY(UpperCamelCase ), \"label\": ANY(UpperCamelCase )},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ]\t\t\t\t\t,\t\t\t\t\t\t)\n\n\n\n\n\t\t@require_torch\n\t\tdef lowercase (\t\t\t\tself ):\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= \"hf-internal-testing/tiny-random-VideoMAEForVideoClassification\"\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= VideoMAEFeatureExtractor(\n\t\t\t\t\t\t\t\t size={\"shortest_edge\": 10}\t\t\t\t\t,\t\t\t\t\t\tcrop_size={\"height\": 10, \"width\": 10} )\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= pipeline(\n\t\t\t\t\t\t\t\t \"video-classification\"\t\t\t\t\t,\t\t\t\t\t\tmodel=UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tfeature_extractor=UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tframe_sampling_rate=4 )\n\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= hf_hub_download(repo_id=\"nateraw/video-demo\"\t\t\t\t\t,\t\t\t\t\t\tfilename=\"archery.mp4\"\t\t\t\t\t,\t\t\t\t\t\trepo_type=\"dataset\" )\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= video_classifier(UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\ttop_k=2 )\n\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t nested_simplify(UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tdecimals=4 )\t\t\t\t\t,\t\t\t\t\t\t[{\"score\": 0.51_99, \"label\": \"LABEL_0\"}, {\"score\": 0.48_01, \"label\": \"LABEL_1\"}]\t\t\t\t\t,\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= video_classifier(\n\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t video_file_path,\n\t\t\t\t\t\t\t\t video_file_path,\n\t\t\t\t\t\t\t\t ]\t\t\t\t\t,\t\t\t\t\t\ttop_k=2\t\t\t\t\t,\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tself.assertEqual(\n\t\t\t\t\t\t\t\t nested_simplify(UpperCamelCase\t\t\t\t\t,\t\t\t\t\t\tdecimals=4 )\t\t\t\t\t,\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t [{\"score\": 0.51_99, \"label\": \"LABEL_0\"}, {\"score\": 0.48_01, \"label\": \"LABEL_1\"}],\n\t\t\t\t\t\t\t\t [{\"score\": 0.51_99, \"label\": \"LABEL_0\"}, {\"score\": 0.48_01, \"label\": \"LABEL_1\"}],\n\t\t\t\t\t\t\t\t ]\t\t\t\t\t,\t\t\t\t\t\t)\n\n\n\n\n\t\t@require_tf\n\t\tdef lowercase (\t\t\t\tself ):\n\t\t\t\t\t\t\t\tpass"},"code_codestyle":{"kind":"number","value":493,"string":"493"},"style_context":{"kind":"string","value":"\n'''simple docstring'''\n\n\n\n\n\n\n\nimport importlib\nimport inspect\nimport json\nimport os\nimport re\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\nfrom urllib import request\n\nfrom huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info\nfrom packaging import version\n\nfrom .. import __version__\nfrom . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging\n\n\n_snake_case\t\t\t\t: Optional[int] =\t\t\t\t(\n \"\"\"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py\"\"\"\n)\n\n\n_snake_case\t\t\t\t: Dict =\t\t\t\tlogging.get_logger(__name__) # pylint: disable=invalid-name\n\n\n\ndef \t\t\t_a\t\t(\t):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= \"https://pypi.org/pypi/diffusers/json\"\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )[\"releases\"].keys()\n\t\t\t\t\t\treturn sorted(_SCREAMING_SNAKE_CASE\t,\t\t\tkey=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) )\n\n\n\ndef \t\t\t_a\t\t(\t):\n\t\t\t\t\t\t# This function has already been executed if HF_MODULES_CACHE already is in the Python path.\n\t\t\t\t\t\tif HF_MODULES_CACHE in sys.path:\n\t\t\t\t\t\t\t\t\t\t\t\treturn\n\n\t\t\t\t\t\tsys.path.append(_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\tos.makedirs(_SCREAMING_SNAKE_CASE\t,\t\t\texist_ok=_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= Path(_SCREAMING_SNAKE_CASE ) / \"__init__.py\"\n\t\t\t\t\t\tif not init_path.exists():\n\t\t\t\t\t\t\t\t\t\t\t\tinit_path.touch()\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: Union[str, os.PathLike] ):\n\t\t\t\t\t\tinit_hf_modules()\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= Path(_SCREAMING_SNAKE_CASE ) / name\n\t\t\t\t\t\t# If the parent module does not exist yet, recursively create it.\n\t\t\t\t\t\tif not dynamic_module_path.parent.exists():\n\t\t\t\t\t\t\t\t\t\t\t\tcreate_dynamic_module(dynamic_module_path.parent )\n\t\t\t\t\t\tos.makedirs(_SCREAMING_SNAKE_CASE\t,\t\t\texist_ok=_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= dynamic_module_path / \"__init__.py\"\n\t\t\t\t\t\tif not init_path.exists():\n\t\t\t\t\t\t\t\t\t\t\t\tinit_path.touch()\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: Optional[Any] ):\n\t\t\t\t\t\twith open(_SCREAMING_SNAKE_CASE\t,\t\t\t\"r\"\t,\t\t\tencoding=\"utf-8\" ) as f:\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= f.read()\n\n\t\t\t\t\t\t# Imports of the form `import .xxx`\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= re.findall(\"^\\s*import\\s+\\.(\\S+)\\s*$\"\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tflags=re.MULTILINE )\n\t\t\t\t\t\t# Imports of the form `from .xxx import yyy`\n\t\t\t\t\t\trelative_imports += re.findall(\"^\\s*from\\s+\\.(\\S+)\\s+import\"\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tflags=re.MULTILINE )\n\t\t\t\t\t\t# Unique-ify\n\t\t\t\t\t\treturn list(set(_SCREAMING_SNAKE_CASE ) )\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: List[str] ):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= False\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [module_file]\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= []\n\n\t\t\t\t\t\t# Let's recurse through all relative imports\n\t\t\t\t\t\twhile not no_change:\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= []\n\t\t\t\t\t\t\t\t\t\t\t\tfor f in files_to_check:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnew_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) )\n\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= Path(_SCREAMING_SNAKE_CASE ).parent\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [str(module_path / m ) for m in new_imports]\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [f for f in new_import_files if f not in all_relative_imports]\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [F'{f}.py' for f in new_import_files]\n\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= len(_SCREAMING_SNAKE_CASE ) == 0\n\t\t\t\t\t\t\t\t\t\t\t\tall_relative_imports.extend(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\treturn all_relative_imports\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: str ):\n\t\t\t\t\t\twith open(_SCREAMING_SNAKE_CASE\t,\t\t\t\"r\"\t,\t\t\tencoding=\"utf-8\" ) as f:\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= f.read()\n\n\t\t\t\t\t\t# Imports of the form `import xxx`\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= re.findall(\"^\\s*import\\s+(\\S+)\\s*$\"\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tflags=re.MULTILINE )\n\t\t\t\t\t\t# Imports of the form `from xxx import yyy`\n\t\t\t\t\t\timports += re.findall(\"^\\s*from\\s+(\\S+)\\s+import\"\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tflags=re.MULTILINE )\n\t\t\t\t\t\t# Only keep the top-level module\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= [imp.split(\".\" )[0] for imp in imports if not imp.startswith(\".\" )]\n\n\t\t\t\t\t\t# Unique-ify and test we got them all\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= list(set(_SCREAMING_SNAKE_CASE ) )\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= []\n\t\t\t\t\t\tfor imp in imports:\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\timportlib.import_module(_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t\t\t\t\t\t\texcept ImportError:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tmissing_packages.append(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\tif len(_SCREAMING_SNAKE_CASE ) > 0:\n\t\t\t\t\t\t\t\t\t\t\t\traise ImportError(\n\t\t\t\t\t\t\t\t\t\t\t\t \"This modeling file requires the following packages that were not found in your environment: \"\n\t\t\t\t\t\t\t\t\t\t\t\t F'{\", \".join(_SCREAMING_SNAKE_CASE )}. Run `pip install {\" \".join(_SCREAMING_SNAKE_CASE )}`' )\n\n\t\t\t\t\t\treturn get_relative_imports(_SCREAMING_SNAKE_CASE )\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: Union[str, Any]\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: int ):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= module_path.replace(os.path.sep\t,\t\t\t\".\" )\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= importlib.import_module(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\tif class_name is None:\n\t\t\t\t\t\t\t\t\t\t\t\treturn find_pipeline_class(_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\treturn getattr(_SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE )\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: List[Any] ):\n\t\t\t\t\t\tfrom ..pipelines import DiffusionPipeline\n\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= dict(inspect.getmembers(_SCREAMING_SNAKE_CASE\t,\t\t\tinspect.isclass ) )\n\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= None\n\t\t\t\t\t\tfor cls_name, cls in cls_members.items():\n\t\t\t\t\t\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t\t\t\t\t cls_name != DiffusionPipeline.__name__\n\t\t\t\t\t\t\t\t\t\t\t\t and issubclass(cls\t,\t\t\t_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t\t\t\t\t\t\t and cls.__module__.split(\".\" )[0] != \"diffusers\"\n\t\t\t\t\t\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif pipeline_class is not None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F' {loaded_module}.' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= cls\n\n\t\t\t\t\t\treturn pipeline_class\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: Union[str, os.PathLike]\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: str\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Union[str, os.PathLike]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Dict[str, str]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Union[bool, str]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[str] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= str(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= os.path.join(_SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\tif os.path.isfile(_SCREAMING_SNAKE_CASE ):\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= module_file_or_url\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= \"local\"\n\t\t\t\t\t\telif pretrained_model_name_or_path.count(\"/\" ) == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= get_diffusers_versions()\n\t\t\t\t\t\t\t\t\t\t\t\t# cut \".dev0\"\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= \"v\" + \".\".join(__version__.split(\".\" )[:3] )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# retrieve github version that matches\n\t\t\t\t\t\t\t\t\t\t\t\tif revision is None:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= latest_version if latest_version[1:] in available_versions else \"main\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.info(F'Defaulting to latest_version: {revision}.' )\n\t\t\t\t\t\t\t\t\t\t\t\telif revision in available_versions:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= F'v{revision}'\n\t\t\t\t\t\t\t\t\t\t\t\telif revision == \"main\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= revision\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F' {\", \".join(available_versions + [\"main\"] )}.' )\n\n\t\t\t\t\t\t\t\t\t\t\t\t# community pipeline on GitHub\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE\t,\t\t\tpipeline=_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= cached_download(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t _SCREAMING_SNAKE_CASE\t,\t\t\tcache_dir=_SCREAMING_SNAKE_CASE\t,\t\t\tforce_download=_SCREAMING_SNAKE_CASE\t,\t\t\tproxies=_SCREAMING_SNAKE_CASE\t,\t\t\tresume_download=_SCREAMING_SNAKE_CASE\t,\t\t\tlocal_files_only=_SCREAMING_SNAKE_CASE\t,\t\t\tuse_auth_token=_SCREAMING_SNAKE_CASE\t,\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= \"git\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= pretrained_model_name_or_path + \".py\"\n\t\t\t\t\t\t\t\t\t\t\t\texcept EnvironmentError:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Load from URL or cache if already cached\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= hf_hub_download(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t _SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tcache_dir=_SCREAMING_SNAKE_CASE\t,\t\t\tforce_download=_SCREAMING_SNAKE_CASE\t,\t\t\tproxies=_SCREAMING_SNAKE_CASE\t,\t\t\tresume_download=_SCREAMING_SNAKE_CASE\t,\t\t\tlocal_files_only=_SCREAMING_SNAKE_CASE\t,\t\t\tuse_auth_token=_SCREAMING_SNAKE_CASE\t,\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= os.path.join(\"local\"\t,\t\t\t\"--\".join(pretrained_model_name_or_path.split(\"/\" ) ) )\n\t\t\t\t\t\t\t\t\t\t\t\texcept EnvironmentError:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlogger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise\n\n # Check we have all the requirements in our environment\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= check_imports(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\t# Now we move the module inside our cached dynamic modules.\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule\n\t\t\t\t\t\tcreate_dynamic_module(_SCREAMING_SNAKE_CASE )\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= Path(_SCREAMING_SNAKE_CASE ) / full_submodule\n\t\t\t\t\t\tif submodule == \"local\" or submodule == \"git\":\n\t\t\t\t\t\t\t\t\t\t\t\t# We always copy local files (we could hash the file to see if there was a change, and give them the name of\n\t\t\t\t\t\t\t\t\t\t\t\t# that hash, to only copy when there is a modification but it seems overkill for now).\n\t\t\t\t\t\t\t\t\t\t\t\t# The only reason we do the copy is to avoid putting too many folders in sys.path.\n\t\t\t\t\t\t\t\t\t\t\t\tshutil.copy(_SCREAMING_SNAKE_CASE\t,\t\t\tsubmodule_path / module_file )\n\t\t\t\t\t\t\t\t\t\t\t\tfor module_needed in modules_needed:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= F'{module_needed}.py'\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.copy(os.path.join(_SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE )\t,\t\t\tsubmodule_path / module_needed )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t# Get the commit hash\n\t\t\t\t\t\t\t\t\t\t\t\t# TODO: we will get this info in the etag soon, so retrieve it from there and not here.\n\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(_SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= use_auth_token\n\t\t\t\t\t\t\t\t\t\t\t\telif use_auth_token is True:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= HfFolder.get_token()\n\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= None\n\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= model_info(_SCREAMING_SNAKE_CASE\t,\t\t\trevision=_SCREAMING_SNAKE_CASE\t,\t\t\ttoken=_SCREAMING_SNAKE_CASE ).sha\n\n\t\t\t\t\t\t\t\t\t\t\t\t# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the\n\t\t\t\t\t\t\t\t\t\t\t\t# benefit of versioning.\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= submodule_path / commit_hash\n\t\t\t\t\t\t\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= full_submodule + os.path.sep + commit_hash\n\t\t\t\t\t\t\t\t\t\t\t\tcreate_dynamic_module(_SCREAMING_SNAKE_CASE )\n\n\t\t\t\t\t\t\t\t\t\t\t\tif not (submodule_path / module_file).exists():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tshutil.copy(_SCREAMING_SNAKE_CASE\t,\t\t\tsubmodule_path / module_file )\n\t\t\t\t\t\t\t\t\t\t\t\t# Make sure we also have every file with relative\n\t\t\t\t\t\t\t\t\t\t\t\tfor module_needed in modules_needed:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not (submodule_path / module_needed).exists():\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tget_cached_module_file(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t _SCREAMING_SNAKE_CASE\t,\t\t\tF'{module_needed}.py'\t,\t\t\tcache_dir=_SCREAMING_SNAKE_CASE\t,\t\t\tforce_download=_SCREAMING_SNAKE_CASE\t,\t\t\tresume_download=_SCREAMING_SNAKE_CASE\t,\t\t\tproxies=_SCREAMING_SNAKE_CASE\t,\t\t\tuse_auth_token=_SCREAMING_SNAKE_CASE\t,\t\t\trevision=_SCREAMING_SNAKE_CASE\t,\t\t\tlocal_files_only=_SCREAMING_SNAKE_CASE\t,\t\t\t)\n\t\t\t\t\t\treturn os.path.join(_SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE )\n\n\n\n\ndef \t\t\t_a\t\t(\t_SCREAMING_SNAKE_CASE\t\t: Union[str, os.PathLike]\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: str\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[str] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Union[str, os.PathLike]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Dict[str, str]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[Union[bool, str]] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: Optional[str] = None\t,\t\t\t_SCREAMING_SNAKE_CASE\t\t: bool = False\t,\t\t\t**_SCREAMING_SNAKE_CASE\t\t: Tuple\t,\t\t\t):\n\t\t\t\t\t\t_SCREAMING_SNAKE_CASE\t\t\t\t= get_cached_module_file(\n\t\t\t\t\t\t _SCREAMING_SNAKE_CASE\t,\t\t\t_SCREAMING_SNAKE_CASE\t,\t\t\tcache_dir=_SCREAMING_SNAKE_CASE\t,\t\t\tforce_download=_SCREAMING_SNAKE_CASE\t,\t\t\tresume_download=_SCREAMING_SNAKE_CASE\t,\t\t\tproxies=_SCREAMING_SNAKE_CASE\t,\t\t\tuse_auth_token=_SCREAMING_SNAKE_CASE\t,\t\t\trevision=_SCREAMING_SNAKE_CASE\t,\t\t\tlocal_files_only=_SCREAMING_SNAKE_CASE\t,\t\t\t)\n\t\t\t\t\t\treturn get_class_in_module(_SCREAMING_SNAKE_CASE\t,\t\t\tfinal_module.replace(\".py\"\t,\t\t\t\"\" ) )"},"style_context_codestyle":{"kind":"number","value":493,"string":"493"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":578,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\nfrom typing import TYPE_CHECKING\r\n\r\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\r\n\r\n\r\nSCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\tint =\t\t\t\t\t{\r\n \"\"\"configuration_upernet\"\"\": [\"\"\"UperNetConfig\"\"\"],\r\n}\r\n\r\ntry:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\nexcept OptionalDependencyNotAvailable:\r\n pass\r\nelse:\r\n SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\tTuple =\t\t\t\t\t[\r\n \"\"\"UperNetForSemanticSegmentation\"\"\",\r\n \"\"\"UperNetPreTrainedModel\"\"\",\r\n ]\r\n\r\n\r\nif TYPE_CHECKING:\r\n from .configuration_upernet import UperNetConfig\r\n\r\n try:\r\n if not is_torch_available():\r\n raise OptionalDependencyNotAvailable()\r\n except OptionalDependencyNotAvailable:\r\n pass\r\n else:\r\n from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel\r\n\r\n\r\nelse:\r\n import sys\r\n\r\n SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t:\t\tDict =\t\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\r\n\r\n"},"code_codestyle":{"kind":"number","value":257,"string":"257"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\n\r\nimport requests\r\nimport torch\r\nfrom PIL import Image\r\nfrom torchvision.transforms import Compose, Normalize, Resize, ToTensor\r\n\r\nfrom transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor\r\ndef \t_A\t\t\t(\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t: List[str]\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n a__ : Optional[Any] =SwinaSRConfig()\r\n\r\n if \"Swin2SR_ClassicalSR_X4_64\" in checkpoint_url:\r\n a__ : Optional[int] =4\r\n elif \"Swin2SR_CompressedSR_X4_48\" in checkpoint_url:\r\n a__ : int =4\r\n a__ : Optional[int] =48\r\n a__ : str =\"pixelshuffle_aux\"\r\n elif \"Swin2SR_Lightweight_X2_64\" in checkpoint_url:\r\n a__ : str =[6, 6, 6, 6]\r\n a__ : Optional[int] =60\r\n a__ : Any =[6, 6, 6, 6]\r\n a__ : int =\"pixelshuffledirect\"\r\n elif \"Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR\" in checkpoint_url:\r\n a__ : List[str] =4\r\n a__ : Union[str, Any] =\"nearest+conv\"\r\n elif \"Swin2SR_Jpeg_dynamic\" in checkpoint_url:\r\n a__ : str =1\r\n a__ : Optional[Any] =1\r\n a__ : str =126\r\n a__ : Optional[Any] =7\r\n a__ : Optional[int] =2_5_5.0\r\n a__ : str =\"\"\r\n\r\n return config\r\ndef \t_A\t\t\t(\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t: Any\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t: Any\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n if \"patch_embed.proj\" in name and \"layers\" not in name:\r\n a__ : Any =name.replace(\"patch_embed.proj\"\t\t\t,\t\t\t\"embeddings.patch_embeddings.projection\"\t\t)\r\n if \"patch_embed.norm\" in name:\r\n a__ : str =name.replace(\"patch_embed.norm\"\t\t\t,\t\t\t\"embeddings.patch_embeddings.layernorm\"\t\t)\r\n if \"layers\" in name:\r\n a__ : Union[str, Any] =name.replace(\"layers\"\t\t\t,\t\t\t\"encoder.stages\"\t\t)\r\n if \"residual_group.blocks\" in name:\r\n a__ : List[Any] =name.replace(\"residual_group.blocks\"\t\t\t,\t\t\t\"layers\"\t\t)\r\n if \"attn.proj\" in name:\r\n a__ : Union[str, Any] =name.replace(\"attn.proj\"\t\t\t,\t\t\t\"attention.output.dense\"\t\t)\r\n if \"attn\" in name:\r\n a__ : int =name.replace(\"attn\"\t\t\t,\t\t\t\"attention.self\"\t\t)\r\n if \"norm1\" in name:\r\n a__ : List[Any] =name.replace(\"norm1\"\t\t\t,\t\t\t\"layernorm_before\"\t\t)\r\n if \"norm2\" in name:\r\n a__ : Optional[int] =name.replace(\"norm2\"\t\t\t,\t\t\t\"layernorm_after\"\t\t)\r\n if \"mlp.fc1\" in name:\r\n a__ : Dict =name.replace(\"mlp.fc1\"\t\t\t,\t\t\t\"intermediate.dense\"\t\t)\r\n if \"mlp.fc2\" in name:\r\n a__ : Optional[int] =name.replace(\"mlp.fc2\"\t\t\t,\t\t\t\"output.dense\"\t\t)\r\n if \"q_bias\" in name:\r\n a__ : List[Any] =name.replace(\"q_bias\"\t\t\t,\t\t\t\"query.bias\"\t\t)\r\n if \"k_bias\" in name:\r\n a__ : Optional[int] =name.replace(\"k_bias\"\t\t\t,\t\t\t\"key.bias\"\t\t)\r\n if \"v_bias\" in name:\r\n a__ : Optional[Any] =name.replace(\"v_bias\"\t\t\t,\t\t\t\"value.bias\"\t\t)\r\n if \"cpb_mlp\" in name:\r\n a__ : List[str] =name.replace(\"cpb_mlp\"\t\t\t,\t\t\t\"continuous_position_bias_mlp\"\t\t)\r\n if \"patch_embed.proj\" in name:\r\n a__ : List[Any] =name.replace(\"patch_embed.proj\"\t\t\t,\t\t\t\"patch_embed.projection\"\t\t)\r\n\r\n if name == \"norm.weight\":\r\n a__ : Dict =\"layernorm.weight\"\r\n if name == \"norm.bias\":\r\n a__ : Any =\"layernorm.bias\"\r\n\r\n if \"conv_first\" in name:\r\n a__ : Tuple =name.replace(\"conv_first\"\t\t\t,\t\t\t\"first_convolution\"\t\t)\r\n\r\n if (\r\n \"upsample\" in name\r\n or \"conv_before_upsample\" in name\r\n or \"conv_bicubic\" in name\r\n or \"conv_up\" in name\r\n or \"conv_hr\" in name\r\n or \"conv_last\" in name\r\n or \"aux\" in name\r\n ):\r\n # heads\r\n if \"conv_last\" in name:\r\n a__ : List[str] =name.replace(\"conv_last\"\t\t\t,\t\t\t\"final_convolution\"\t\t)\r\n if config.upsampler in [\"pixelshuffle\", \"pixelshuffle_aux\", \"nearest+conv\"]:\r\n if \"conv_before_upsample.0\" in name:\r\n a__ : str =name.replace(\"conv_before_upsample.0\"\t\t\t,\t\t\t\"conv_before_upsample\"\t\t)\r\n if \"upsample.0\" in name:\r\n a__ : Any =name.replace(\"upsample.0\"\t\t\t,\t\t\t\"upsample.convolution_0\"\t\t)\r\n if \"upsample.2\" in name:\r\n a__ : Optional[int] =name.replace(\"upsample.2\"\t\t\t,\t\t\t\"upsample.convolution_1\"\t\t)\r\n a__ : Any =\"upsample.\" + name\r\n elif config.upsampler == \"pixelshuffledirect\":\r\n a__ : str =name.replace(\"upsample.0.weight\"\t\t\t,\t\t\t\"upsample.conv.weight\"\t\t)\r\n a__ : Any =name.replace(\"upsample.0.bias\"\t\t\t,\t\t\t\"upsample.conv.bias\"\t\t)\r\n else:\r\n pass\r\n else:\r\n a__ : Dict =\"swin2sr.\" + name\r\n\r\n return name\r\ndef \t_A\t\t\t(\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t: Tuple\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t: Any\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n for key in orig_state_dict.copy().keys():\r\n a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE\t\t)\r\n\r\n if \"qkv\" in key:\r\n a__ : str =key.split(\".\"\t\t)\r\n a__ : Optional[int] =int(key_split[1]\t\t)\r\n a__ : Dict =int(key_split[4]\t\t)\r\n a__ : List[Any] =config.embed_dim\r\n\r\n if \"weight\" in key:\r\n a__ : List[Any] =val[:dim, :]\r\n a__ : List[str] =val[dim : dim * 2, :]\r\n a__ : Dict =val[-dim:, :]\r\n else:\r\n a__ : int =val[:dim]\r\n a__ : Union[str, Any] =val[dim : dim * 2]\r\n a__ : Tuple =val[-dim:]\r\n pass\r\n else:\r\n a__ : Union[str, Any] =val\r\n\r\n return orig_state_dict\r\ndef \t_A\t\t\t(\t\t\t\t\t\tSCREAMING_SNAKE_CASE\t: str\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t: List[Any]\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t: str\t\t):\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE\t\t)\r\n a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE\t\t)\r\n model.eval()\r\n\r\n a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE\t\t\t,\t\t\tmap_location=\"cpu\"\t\t)\r\n a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t\t)\r\n a__ ,\t\t\t\ta__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE\t\t\t,\t\t\tstrict=SCREAMING_SNAKE_CASE\t\t)\r\n\r\n if len(SCREAMING_SNAKE_CASE\t\t) > 0:\r\n raise ValueError(\"Missing keys when converting: {}\".format(SCREAMING_SNAKE_CASE\t\t)\t\t)\r\n for key in unexpected_keys:\r\n if not (\"relative_position_index\" in key or \"relative_coords_table\" in key or \"self_mask\" in key):\r\n raise ValueError(f'''Unexpected key {key} in state_dict'''\t\t)\r\n\r\n # verify values\r\n a__ : str =\"https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true\"\r\n a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE\t\t\t,\t\t\tstream=SCREAMING_SNAKE_CASE\t\t).raw\t\t).convert(\"RGB\"\t\t)\r\n a__ : Dict =SwinaSRImageProcessor()\r\n # pixel_values = processor(image, return_tensors=\"pt\").pixel_values\r\n\r\n a__ : List[str] =126 if \"Jpeg\" in checkpoint_url else 256\r\n a__ : Optional[Any] =Compose(\r\n [\r\n Resize((image_size, image_size)\t\t),\r\n ToTensor(),\r\n Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6]\t\t\t,\t\t\tstd=[0.2_2_9, 0.2_2_4, 0.2_2_5]\t\t),\r\n ]\t\t)\r\n a__ : Dict =transforms(SCREAMING_SNAKE_CASE\t\t).unsqueeze(0\t\t)\r\n\r\n if config.num_channels == 1:\r\n a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1\t\t)\r\n\r\n a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE\t\t)\r\n\r\n # assert values\r\n if \"Swin2SR_ClassicalSR_X2_64\" in checkpoint_url:\r\n a__ : str =torch.Size([1, 3, 512, 512]\t\t)\r\n a__ : List[str] =torch.tensor(\r\n [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]]\t\t)\r\n elif \"Swin2SR_ClassicalSR_X4_64\" in checkpoint_url:\r\n a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024]\t\t)\r\n a__ : List[str] =torch.tensor(\r\n [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]]\t\t)\r\n elif \"Swin2SR_CompressedSR_X4_48\" in checkpoint_url:\r\n # TODO values didn't match exactly here\r\n a__ : Tuple =torch.Size([1, 3, 1_024, 1_024]\t\t)\r\n a__ : Optional[int] =torch.tensor(\r\n [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]]\t\t)\r\n elif \"Swin2SR_Lightweight_X2_64\" in checkpoint_url:\r\n a__ : Tuple =torch.Size([1, 3, 512, 512]\t\t)\r\n a__ : str =torch.tensor(\r\n [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]]\t\t)\r\n elif \"Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR\" in checkpoint_url:\r\n a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024]\t\t)\r\n a__ : Optional[Any] =torch.tensor(\r\n [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]]\t\t)\r\n\r\n assert (\r\n outputs.reconstruction.shape == expected_shape\r\n ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''\r\n assert torch.allclose(outputs.reconstruction[0, 0, :3, :3]\t\t\t,\t\t\tSCREAMING_SNAKE_CASE\t\t\t,\t\t\tatol=1e-3\t\t)\r\n print(\"Looks ok!\"\t\t)\r\n\r\n a__ : int ={\r\n \"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth\": (\r\n \"swin2SR-classical-sr-x2-64\"\r\n ),\r\n \"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth\": (\r\n \"swin2SR-classical-sr-x4-64\"\r\n ),\r\n \"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth\": (\r\n \"swin2SR-compressed-sr-x4-48\"\r\n ),\r\n \"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth\": (\r\n \"swin2SR-lightweight-x2-64\"\r\n ),\r\n \"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth\": (\r\n \"swin2SR-realworld-sr-x4-64-bsrgan-psnr\"\r\n ),\r\n }\r\n a__ : Any =url_to_name[checkpoint_url]\r\n\r\n if pytorch_dump_folder_path is not None:\r\n print(f'''Saving model {model_name} to {pytorch_dump_folder_path}'''\t\t)\r\n model.save_pretrained(SCREAMING_SNAKE_CASE\t\t)\r\n print(f'''Saving image processor to {pytorch_dump_folder_path}'''\t\t)\r\n processor.save_pretrained(SCREAMING_SNAKE_CASE\t\t)\r\n\r\n if push_to_hub:\r\n model.push_to_hub(f'''caidas/{model_name}'''\t\t)\r\n processor.push_to_hub(f'''caidas/{model_name}'''\t\t)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t=\t\t\targparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\r\n \"\"\"--checkpoint_url\"\"\",\r\n default=\"\"\"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth\"\"\",\r\n type=str,\r\n help=\"\"\"URL of the original Swin2SR checkpoint you'd like to convert.\"\"\",\r\n )\r\n parser.add_argument(\r\n \"\"\"--pytorch_dump_folder_path\"\"\", default=None, type=str, help=\"\"\"Path to the output PyTorch model directory.\"\"\"\r\n )\r\n parser.add_argument(\"\"\"--push_to_hub\"\"\", action=\"\"\"store_true\"\"\", help=\"\"\"Whether to push the converted model to the hub.\"\"\")\r\n\r\n UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t=\t\t\tparser.parse_args()\r\n convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":563,"string":"563"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":579,"cells":{"code":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\r\r\r\r\r\rfrom collections.abc import Callable\r\rimport numpy as np\rdef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase ):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tUnion[str, Any]\t\t\t\t\t\t\t = int(np.ceil((x_end - xa) / step_size ) )\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t = np.zeros((n + 1,) )\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t = ya\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t = xa\r\r for k in range(UpperCamelCase ):\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tint\t\t\t\t\t\t\t = y[k] + step_size * ode_func(UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\ty[k] )\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t = y[k] + (\r (step_size / 2) * (ode_func(UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\ty[k] ) + ode_func(x + step_size\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase ))\r )\r x += step_size\r\r return y\r\r\rif __name__ == \"__main__\":\r import doctest\r\r doctest.testmod()\r\r\r"},"code_codestyle":{"kind":"number","value":160,"string":"160"},"style_context":{"kind":"string","value":"\r\r\r\r'''simple docstring'''\r\r\r\r\r\rdef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase ):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r while a != 0:\r lowerCAmelCase__ , lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tOptional[Any]\t\t\t\t\t\t\t = b % a, a\r return b\r\r\r\r\rdef _SCREAMING_SNAKE_CASE\t\t\t\t\t\t( UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase ):\r\r\r\r \"\"\"simple docstring\"\"\"\r\r\r\r\r\r if gcd(UpperCamelCase\t\t\t\t\t\t\t,\t\t\t\t\tUpperCamelCase ) != 1:\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t\t\t\t = f\"\"\"mod inverse of {a!r} and {m!r} does not exist\"\"\"\r raise ValueError(UpperCamelCase )\r lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t = 1, 0, a\r lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t = 0, 1, m\r while va != 0:\r lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tOptional[int]\t\t\t\t\t\t\t = ua // va\r lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__\t\t\t\t:\t\t\t\t\t\tAny\t\t\t\t\t\t\t = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va\r return ua % m\r\r\r"},"style_context_codestyle":{"kind":"number","value":160,"string":"160"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":580,"cells":{"code":{"kind":"string","value":"\r\r\r\r\r\r'''simple docstring'''\r\rimport unittest\r\rfrom transformers import DebertaVaTokenizer, DebertaVaTokenizerFast\rfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow\r\rfrom ...test_tokenization_common import TokenizerTesterMixin\r\r\r__lowerCAmelCase\t\t\t\t: List[str]\t\t\t\t\t\t =get_tests_dir(\"fixtures/spiece.model\")\r\r\r\r\r\r\r@require_sentencepiece\r@require_tokenizers\rclass UpperCAmelCase (\t\t\t__a ,\t\t\tunittest.TestCase\t\t\t\t\t\t\t):\r __lowercase \t\t\t\t\t\t=\t\t\t\t\t\t\tDebertaVaTokenizer\r __lowercase \t\t\t\t\t\t=\t\t\t\t\t\t\tDebertaVaTokenizerFast\r __lowercase \t\t\t\t\t\t=\t\t\t\t\t\t\tTrue\r __lowercase \t\t\t\t\t\t=\t\t\t\t\t\t\tTrue\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :str\t\t\t\t)->\t\t\t\tstr:\r super().setUp()\r\r # We have a SentencePiece fixture for testing\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tunk_token=\"\"\t\t\t\t)\r tokenizer.save_pretrained(self.tmpdirname\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :str\t\t,\t\t\tlowercase_ :int\t\t\t\t)->\t\t\t\tList[str]:\r A__\t\t\t\t\t\t\t= \"this is a test\"\r A__\t\t\t\t\t\t\t= \"this is a test\"\r return input_text, output_text\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Tuple\t\t\t\t)->\t\t\t\tList[Any]:\r A__\t\t\t\t\t\t\t= \"\"\r A__\t\t\t\t\t\t\t= 0\r\r self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__\t\t\t\t)\t\t,\t\t\ta__\t\t\t\t)\r self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__\t\t\t\t)\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :str\t\t\t\t)->\t\t\t\tOptional[int]:\r A__\t\t\t\t\t\t\t= list(self.get_tokenizer().get_vocab().keys()\t\t\t\t)\r self.assertEqual(vocab_keys[0]\t\t,\t\t\t\"\"\t\t\t\t)\r self.assertEqual(vocab_keys[1]\t\t,\t\t\t\"\"\t\t\t\t)\r self.assertEqual(vocab_keys[-1]\t\t,\t\t\t\"[PAD]\"\t\t\t\t)\r self.assertEqual(len(a__\t\t\t\t)\t\t,\t\t\t3_00_01\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Tuple\t\t\t\t)->\t\t\t\tAny:\r self.assertEqual(self.get_tokenizer().vocab_size\t\t,\t\t\t3_00_00\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :List[Any]\t\t\t\t)->\t\t\t\tstr:\r # fmt: off\r A__\t\t\t\t\t\t\t= \" \\tHeLLo!how \\n Are yoU? \"\r A__\t\t\t\t\t\t\t= [\"▁hello\", \"!\", \"how\", \"▁are\", \"▁you\", \"?\"]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tdo_lower_case=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tdo_lower_case=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r @unittest.skip(\"There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.\"\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Optional[int]\t\t\t\t)->\t\t\t\tOptional[Any]:\r pass\r @unittest.skip(\"There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.\"\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :List[Any]\t\t\t\t)->\t\t\t\tint:\r pass\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Dict\t\t\t\t)->\t\t\t\tList[str]:\r # fmt: off\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r A__\t\t\t\t\t\t\t= [\"▁\", \"\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \"▁\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"\", \"▁\", \".\", ]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :List[Any]\t\t\t\t)->\t\t\t\tList[Any]:\r # fmt: off\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r A__\t\t\t\t\t\t\t= [\"▁i\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \"▁\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"\", \"▁\", \".\", ]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Any\t\t\t\t)->\t\t\t\tOptional[int]:\r # fmt: off\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r A__\t\t\t\t\t\t\t= [\"▁i\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"\", \".\", ]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Optional[Any]\t\t\t\t)->\t\t\t\tstr:\r # fmt: off\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r A__\t\t\t\t\t\t\t= [\"▁\", \"\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \"▁\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"\", \"▁\", \".\", ]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Union[str, Any]\t\t\t\t)->\t\t\t\tDict:\r # fmt: off\r A__\t\t\t\t\t\t\t= \" \\tHeLLo!how \\n Are yoU? \"\r A__\t\t\t\t\t\t\t= [\"▁\", \"\", \"e\", \"\", \"o\", \"!\", \"how\", \"▁\", \"\", \"re\", \"▁yo\", \"\", \"?\"]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tdo_lower_case=a__\t\t,\t\t\tsplit_by_punct=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :str\t\t\t\t)->\t\t\t\tOptional[Any]:\r A__\t\t\t\t\t\t\t= self.get_tokenizer()\r A__\t\t\t\t\t\t\t= self.get_rust_tokenizer()\r\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= self.get_rust_tokenizer()\r A__\t\t\t\t\t\t\t= tokenizer.encode(a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.encode(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Tuple\t\t\t\t)->\t\t\t\tOptional[Any]:\r A__\t\t\t\t\t\t\t= \"This is a test\"\r A__\t\t\t\t\t\t\t= [13, 1, 43_98, 25, 21, 12_89]\r A__\t\t\t\t\t\t\t= [\"▁\", \"T\", \"his\", \"▁is\", \"▁a\", \"▁test\"]\r A__\t\t\t\t\t\t\t= [\"▁\", \"\", \"his\", \"▁is\", \"▁a\", \"▁test\"]\r\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t,\t\t\tkeep_accents=a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= DebertaVaTokenizerFast(a__\t\t,\t\t\tkeep_accents=a__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.tokenize(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.tokenize(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r # fmt: off\r A__\t\t\t\t\t\t\t= \"I was born in 92000, and this is falsé.\"\r A__\t\t\t\t\t\t\t= [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]\r A__\t\t\t\t\t\t\t= [\"▁\", \"I\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"é\", \".\", ]\r A__\t\t\t\t\t\t\t= [\"▁\", \"\", \"▁was\", \"▁born\", \"▁in\", \"▁9\", \"2000\", \",\", \"▁and\", \"▁this\", \"▁is\", \"▁fal\", \"s\", \"\", \".\", ]\r # fmt: on\r\r A__\t\t\t\t\t\t\t= tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.tokenize(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.convert_ids_to_tokens(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= rust_tokenizer.encode(a__\t\t,\t\t\tadd_special_tokens=a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.tokenize(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r A__\t\t\t\t\t\t\t= rust_tokenizer.convert_ids_to_tokens(a__\t\t\t\t)\r self.assertListEqual(a__\t\t,\t\t\ta__\t\t\t\t)\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :List[Any]\t\t\t\t)->\t\t\t\tTuple:\r A__\t\t\t\t\t\t\t= DebertaVaTokenizer(a__\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= tokenizer.encode(\"sequence builders\"\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.encode(\"multi-sequence build\"\t\t\t\t)\r\r A__\t\t\t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(a__\t\t\t\t)\r A__\t\t\t\t\t\t\t= tokenizer.build_inputs_with_special_tokens(a__\t\t,\t\t\ta__\t\t\t\t)\r\r self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]\t\t,\t\t\ta__\t\t\t\t)\r self.assertEqual(\r [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id]\t\t,\t\t\ta__\t\t,\t\t\t)\r\r\r\r @slow\r def \tUpperCAmelCase_\t\t\t\t(\t\t\t\t\t\t\tself :Tuple\t\t\t\t)->\t\t\t\tstr:\r # fmt: off\r A__\t\t\t\t\t\t\t= {\"input_ids\": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], \"token_type_ids\": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], \"attention_mask\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\r # fmt: on\r\r self.tokenizer_integration_test_util(\r expected_encoding=a__\t\t,\t\t\tmodel_name=\"microsoft/deberta-v2-xlarge\"\t\t,\t\t\trevision=\"ad6e42c1532ddf3a15c39246b63f5559d558b670\"\t\t,\t\t\t)\r\r\r\r"},"code_codestyle":{"kind":"number","value":440,"string":"440"},"style_context":{"kind":"string","value":"\r\r'''simple docstring'''\r\r\r\r\rfrom ...processing_utils import ProcessorMixin\rfrom ...tokenization_utils_base import BatchEncoding\r\r\r\r\r\r\rclass _SCREAMING_SNAKE_CASE ( __a ):\r __SCREAMING_SNAKE_CASE\t:Optional[int] \t\t\t=\t\"\"\"ClapFeatureExtractor\"\"\"\r __SCREAMING_SNAKE_CASE\t:List[Any] \t\t\t=\t(\"\"\"RobertaTokenizer\"\"\", \"\"\"RobertaTokenizerFast\"\"\")\r\r\r\r def __init__( self\t\t\t\t\t\t: Optional[Any]\t\t\t\t\t, a__\t\t\t\t\t\t: Dict\t\t\t\t\t, a__\t\t\t\t\t\t: Dict ):\r super().__init__(a__\t\t\t\t\t, a__ )\r\r\r\r def __call__( self\t\t\t\t\t\t: Dict\t\t\t\t\t, a__\t\t\t\t\t\t: List[str]=None\t\t\t\t\t, a__\t\t\t\t\t\t: List[Any]=None\t\t\t\t\t, a__\t\t\t\t\t\t: Any=None\t\t\t\t\t, **a__\t\t\t\t\t\t: Tuple ):\r __magic_name__\t\t =\t\t\t\t\t\t\tkwargs.pop('''sampling_rate'''\t\t\t\t\t, a__ )\r\r if text is None and audios is None:\r raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )\r\r if text is not None:\r __magic_name__\t\t =\t\t\t\t\t\t\tself.tokenizer(a__\t\t\t\t\t, return_tensors=a__\t\t\t\t\t, **a__ )\r\r if audios is not None:\r __magic_name__\t\t =\t\t\t\t\t\t\tself.feature_extractor(\r a__\t\t\t\t\t, sampling_rate=a__\t\t\t\t\t, return_tensors=a__\t\t\t\t\t, **a__ )\r\r if text is not None and audios is not None:\r __magic_name__\t\t =\t\t\t\t\t\t\taudio_features.input_features\r return encoding\r elif text is not None:\r return encoding\r else:\r return BatchEncoding(data=dict(**a__ )\t\t\t\t\t, tensor_type=a__ )\r\r\r\r def \t\t\t\t\tsnake_case__ ( self\t\t\t\t\t\t: List[Any]\t\t\t\t\t, *a__\t\t\t\t\t\t: str\t\t\t\t\t, **a__\t\t\t\t\t\t: List[str] ):\r return self.tokenizer.batch_decode(*a__\t\t\t\t\t, **a__ )\r\r\r\r def \t\t\t\t\tsnake_case__ ( self\t\t\t\t\t\t: int\t\t\t\t\t, *a__\t\t\t\t\t\t: Tuple\t\t\t\t\t, **a__\t\t\t\t\t\t: Tuple ):\r return self.tokenizer.decode(*a__\t\t\t\t\t, **a__ )\r\r\r\r @property\r def \t\t\t\t\tsnake_case__ ( self\t\t\t\t\t\t: Any ):\r __magic_name__\t\t =\t\t\t\t\t\t\tself.tokenizer.model_input_names\r __magic_name__\t\t =\t\t\t\t\t\t\tself.feature_extractor.model_input_names\r return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":432,"string":"432"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":581,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nimport json\r\nimport os\r\nimport tempfile\r\n\r\nfrom transformers.testing_utils import check_json_file_has_correct_format\r\n\r\n\r\n\r\n\r\nclass __A :\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n UpperCamelCase__\t\t\t\t: str \t\t\t\t=None\r\n\r\n\r\n def __lowercase\t\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __UpperCamelCase : List[Any] \t=self.feature_extraction_class(**self.feat_extract_dict\t\t\t\t\t)\r\n __UpperCamelCase : List[str] \t=json.loads(feat_extract.to_json_string()\t\t\t\t\t)\r\n for key, value in self.feat_extract_dict.items():\r\n self.assertEqual(obj[key]\t\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase__\t\t\t\t\t)\r\n\r\n\r\n def __lowercase\t\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __UpperCamelCase : int \t=self.feature_extraction_class(**self.feat_extract_dict\t\t\t\t\t)\r\n\r\n with tempfile.TemporaryDirectory() as tmpdirname:\r\n __UpperCamelCase : Optional[int] \t=os.path.join(lowerCamelCase__\t\t\t\t\t\t\t,\t\t\t\t\t\t'feat_extract.json'\t\t\t\t\t)\r\n feat_extract_first.to_json_file(lowerCamelCase__\t\t\t\t\t)\r\n __UpperCamelCase : List[str] \t=self.feature_extraction_class.from_json_file(lowerCamelCase__\t\t\t\t\t)\r\n\r\n self.assertEqual(feat_extract_second.to_dict()\t\t\t\t\t\t\t,\t\t\t\t\t\tfeat_extract_first.to_dict()\t\t\t\t\t)\r\n\r\n\r\n def __lowercase\t\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __UpperCamelCase : List[str] \t=self.feature_extraction_class(**self.feat_extract_dict\t\t\t\t\t)\r\n\r\n with tempfile.TemporaryDirectory() as tmpdirname:\r\n __UpperCamelCase : Union[str, Any] \t=feat_extract_first.save_pretrained(lowerCamelCase__\t\t\t\t\t)[0]\r\n check_json_file_has_correct_format(lowerCamelCase__\t\t\t\t\t)\r\n __UpperCamelCase : Optional[int] \t=self.feature_extraction_class.from_pretrained(lowerCamelCase__\t\t\t\t\t)\r\n\r\n self.assertEqual(feat_extract_second.to_dict()\t\t\t\t\t\t\t,\t\t\t\t\t\tfeat_extract_first.to_dict()\t\t\t\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __lowercase\t\t\t\t\t\t\t(\t\t\t\tself\t\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n __UpperCamelCase : Optional[Any] \t=self.feature_extraction_class()\r\n self.assertIsNotNone(lowerCamelCase__\t\t\t\t\t)\r\n\r\n"},"code_codestyle":{"kind":"number","value":721,"string":"721"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nimport csv\r\n\r\nimport requests\r\nfrom bsa import BeautifulSoup\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tA ( a_ = \"\" ) ->\t\tdict[str, float]:\r\n\t\t\t\t\t__UpperCamelCase : Tuple \t=url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'\r\n\t\t\t\t\t__UpperCamelCase : Optional[int] \t=BeautifulSoup(requests.get(a_ ).text\t\t\t\t\t\t\t,'html.parser' )\r\n\t\t\t\t\t__UpperCamelCase : Union[str, Any] \t=soup.find_all('td'\t\t\t\t\t\t\t,attrs='titleColumn' )\r\n\t\t\t\t\t__UpperCamelCase : Any \t=soup.find_all('td'\t\t\t\t\t\t\t,class_='ratingColumn imdbRating' )\r\n\t\t\t\t\treturn {\r\n\t\t\t\t\t title.a.text: float(rating.strong.text )\r\n\t\t\t\t\t for title, rating in zip(a_\t\t\t\t\t\t\t,a_ )\r\n\t\t\t\t\t}\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t\t\t\tA ( a_ = \"IMDb_Top_250_Movies.csv\" ) ->\t\tNone:\r\n\t\t\t\t\t__UpperCamelCase : Dict \t=get_imdb_top_aaa_movies()\r\n\t\t\t\t\twith open(a_\t\t\t\t\t\t\t,'w'\t\t\t\t\t\t\t,newline='' ) as out_file:\r\n\t\t\t\t\t\t\t\t\t\t__UpperCamelCase : Any \t=csv.writer(a_ )\r\n\t\t\t\t\t\t\t\t\t\twriter.writerow(['Movie title', 'IMDb rating'] )\r\n\t\t\t\t\t\t\t\t\t\tfor title, rating in movies.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriter.writerow([title, rating] )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\t\t\t\t\t\twrite_movies()\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":154,"string":"154"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":582,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport inspect\r\nimport unittest\r\nimport warnings\r\n\r\nfrom transformers import DeiTConfig\r\nfrom transformers.models.auto import get_values\r\nfrom transformers.testing_utils import (\r\n require_accelerate,\r\n require_torch,\r\n require_torch_gpu,\r\n require_vision,\r\n slow,\r\n torch_device,\r\n)\r\nfrom transformers.utils import cached_property, is_torch_available, is_vision_available\r\n\r\nfrom ...test_configuration_common import ConfigTester\r\nfrom ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\r\nfrom ...test_pipeline_mixin import PipelineTesterMixin\r\n\r\n\r\nif is_torch_available():\r\n import torch\r\n from torch import nn\r\n\r\n from transformers import (\r\n MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,\r\n MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\r\n MODEL_MAPPING,\r\n DeiTForImageClassification,\r\n DeiTForImageClassificationWithTeacher,\r\n DeiTForMaskedImageModeling,\r\n DeiTModel,\r\n )\r\n from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST\r\n\r\n\r\nif is_vision_available():\r\n from PIL import Image\r\n\r\n from transformers import DeiTImageProcessor\r\nclass \t\t_A :\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t\t\t: Tuple ,\t\t_A\t\t\t\t\t\t: int ,\t\t_A\t\t\t\t\t\t: int=13 ,\t\t_A\t\t\t\t\t\t: Any=30 ,\t\t_A\t\t\t\t\t\t: int=2 ,\t\t_A\t\t\t\t\t\t: str=3 ,\t\t_A\t\t\t\t\t\t: Tuple=True ,\t\t_A\t\t\t\t\t\t: Union[str, Any]=True ,\t\t_A\t\t\t\t\t\t: int=32 ,\t\t_A\t\t\t\t\t\t: Union[str, Any]=5 ,\t\t_A\t\t\t\t\t\t: Union[str, Any]=4 ,\t\t_A\t\t\t\t\t\t: List[Any]=37 ,\t\t_A\t\t\t\t\t\t: Tuple=\"gelu\" ,\t\t_A\t\t\t\t\t\t: Union[str, Any]=0.1 ,\t\t_A\t\t\t\t\t\t: Any=0.1 ,\t\t_A\t\t\t\t\t\t: Optional[Any]=10 ,\t\t_A\t\t\t\t\t\t: List[str]=0.02 ,\t\t_A\t\t\t\t\t\t: Dict=3 ,\t\t_A\t\t\t\t\t\t: Tuple=None ,\t\t_A\t\t\t\t\t\t: str=2 ,\t\t)\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Dict =\t\t\t\t\tparent\r\n lowercase : Optional[int] =\t\t\t\t\tbatch_size\r\n lowercase : Any =\t\t\t\t\timage_size\r\n lowercase : Any =\t\t\t\t\tpatch_size\r\n lowercase : List[str] =\t\t\t\t\tnum_channels\r\n lowercase : Optional[Any] =\t\t\t\t\tis_training\r\n lowercase : List[Any] =\t\t\t\t\tuse_labels\r\n lowercase : Optional[Any] =\t\t\t\t\thidden_size\r\n lowercase : int =\t\t\t\t\tnum_hidden_layers\r\n lowercase : Union[str, Any] =\t\t\t\t\tnum_attention_heads\r\n lowercase : Union[str, Any] =\t\t\t\t\tintermediate_size\r\n lowercase : Union[str, Any] =\t\t\t\t\thidden_act\r\n lowercase : Optional[Any] =\t\t\t\t\thidden_dropout_prob\r\n lowercase : List[Any] =\t\t\t\t\tattention_probs_dropout_prob\r\n lowercase : Tuple =\t\t\t\t\ttype_sequence_label_size\r\n lowercase : str =\t\t\t\t\tinitializer_range\r\n lowercase : Union[str, Any] =\t\t\t\t\tscope\r\n lowercase : int =\t\t\t\t\tencoder_stride\r\n\r\n # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)\r\n lowercase : List[str] =\t\t\t\t\t(image_size // patch_size) ** 2\r\n lowercase : Union[str, Any] =\t\t\t\t\tnum_patches + 2\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Any )\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : List[Any] =\t\t\t\t\tfloats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )\r\n\r\n lowercase : List[Any] =\t\t\t\t\tNone\r\n if self.use_labels:\r\n lowercase : List[str] =\t\t\t\t\tids_tensor([self.batch_size] ,\t\tself.type_sequence_label_size )\r\n\r\n lowercase : str =\t\t\t\t\tself.get_config()\r\n\r\n return config, pixel_values, labels\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Tuple )\t\t\t\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return DeiTConfig(\r\n image_size=self.image_size ,\t\tpatch_size=self.patch_size ,\t\tnum_channels=self.num_channels ,\t\thidden_size=self.hidden_size ,\t\tnum_hidden_layers=self.num_hidden_layers ,\t\tnum_attention_heads=self.num_attention_heads ,\t\tintermediate_size=self.intermediate_size ,\t\thidden_act=self.hidden_act ,\t\thidden_dropout_prob=self.hidden_dropout_prob ,\t\tattention_probs_dropout_prob=self.attention_probs_dropout_prob ,\t\tis_decoder=_A ,\t\tinitializer_range=self.initializer_range ,\t\tencoder_stride=self.encoder_stride ,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[str] ,\t\t_A\t\t\t\t\t\t: Optional[int] ,\t\t_A\t\t\t\t\t\t: Optional[Any] ,\t\t_A\t\t\t\t\t\t: List[Any] )\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Any =\t\t\t\t\tDeiTModel(config=_A )\r\n model.to(_A )\r\n model.eval()\r\n lowercase : Union[str, Any] =\t\t\t\t\tmodel(_A )\r\n self.parent.assertEqual(result.last_hidden_state.shape ,\t\t(self.batch_size, self.seq_length, self.hidden_size) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Union[str, Any] ,\t\t_A\t\t\t\t\t\t: Any ,\t\t_A\t\t\t\t\t\t: Optional[int] ,\t\t_A\t\t\t\t\t\t: List[str] )\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Optional[int] =\t\t\t\t\tDeiTForMaskedImageModeling(config=_A )\r\n model.to(_A )\r\n model.eval()\r\n lowercase : Any =\t\t\t\t\tmodel(_A )\r\n self.parent.assertEqual(\r\n result.reconstruction.shape ,\t\t(self.batch_size, self.num_channels, self.image_size, self.image_size) )\r\n\r\n # test greyscale images\r\n lowercase : List[Any] =\t\t\t\t\t1\r\n lowercase : Union[str, Any] =\t\t\t\t\tDeiTForMaskedImageModeling(_A )\r\n model.to(_A )\r\n model.eval()\r\n\r\n lowercase : List[str] =\t\t\t\t\tfloats_tensor([self.batch_size, 1, self.image_size, self.image_size] )\r\n lowercase : Optional[Any] =\t\t\t\t\tmodel(_A )\r\n self.parent.assertEqual(result.reconstruction.shape ,\t\t(self.batch_size, 1, self.image_size, self.image_size) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[str] ,\t\t_A\t\t\t\t\t\t: Optional[int] ,\t\t_A\t\t\t\t\t\t: Optional[Any] ,\t\t_A\t\t\t\t\t\t: List[str] )\t\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\tself.type_sequence_label_size\r\n lowercase : List[Any] =\t\t\t\t\tDeiTForImageClassification(_A )\r\n model.to(_A )\r\n model.eval()\r\n lowercase : Tuple =\t\t\t\t\tmodel(_A ,\t\tlabels=_A )\r\n self.parent.assertEqual(result.logits.shape ,\t\t(self.batch_size, self.type_sequence_label_size) )\r\n\r\n # test greyscale images\r\n lowercase : Any =\t\t\t\t\t1\r\n lowercase : Optional[Any] =\t\t\t\t\tDeiTForImageClassification(_A )\r\n model.to(_A )\r\n model.eval()\r\n\r\n lowercase : Optional[Any] =\t\t\t\t\tfloats_tensor([self.batch_size, 1, self.image_size, self.image_size] )\r\n lowercase : Optional[int] =\t\t\t\t\tmodel(_A ,\t\tlabels=_A )\r\n self.parent.assertEqual(result.logits.shape ,\t\t(self.batch_size, self.type_sequence_label_size) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Optional[int] )\t\t\t\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : str =\t\t\t\t\tself.prepare_config_and_inputs()\r\n (\r\n (\r\n lowercase\r\n ) ,\t\t\t(\r\n lowercase\r\n ) ,\t\t\t(\r\n lowercase\r\n ) ,\t\t\t\r\n ) : Union[str, Any] =\t\t\t\t\tconfig_and_inputs\r\n lowercase : List[str] =\t\t\t\t\t{'''pixel_values''': pixel_values}\r\n return config, inputs_dict\r\n\r\n\r\n@require_torch\r\nclass \t\t_A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):\r\n _UpperCamelCase : int\t\t\t\t\t= (\r\n (\r\n DeiTModel,\r\n DeiTForImageClassification,\r\n DeiTForImageClassificationWithTeacher,\r\n DeiTForMaskedImageModeling,\r\n )\r\n if is_torch_available()\r\n else ()\r\n )\r\n _UpperCamelCase : Tuple\t\t\t\t\t= (\r\n {\r\n '''feature-extraction''': DeiTModel,\r\n '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),\r\n }\r\n if is_torch_available()\r\n else {}\r\n )\r\n\r\n _UpperCamelCase : Optional[int]\t\t\t\t\t= False\r\n _UpperCamelCase : Any\t\t\t\t\t= False\r\n _UpperCamelCase : Any\t\t\t\t\t= False\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Optional[int] =\t\t\t\t\tDeiTModelTester(self )\r\n lowercase : Optional[Any] =\t\t\t\t\tConfigTester(self ,\t\tconfig_class=_A ,\t\thas_text_modality=_A ,\t\thidden_size=37 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[Any] )\t\t\t\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.config_tester.run_common_tests()\r\n\r\n\r\n\r\n\r\n\r\n\r\n @unittest.skip(reason='''DeiT does not use inputs_embeds''' )\r\n def __a ( self\t\t\t\t\t\t: Optional[Any] )\t\t\t\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: int )\t\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase ,\t\t\tlowercase : List[str] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n lowercase : int =\t\t\t\t\tmodel_class(_A )\r\n self.assertIsInstance(model.get_input_embeddings() ,\t\t(nn.Module) )\r\n lowercase : Tuple =\t\t\t\t\tmodel.get_output_embeddings()\r\n self.assertTrue(x is None or isinstance(_A ,\t\tnn.Linear ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Tuple )\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase ,\t\t\tlowercase : Dict =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n for model_class in self.all_model_classes:\r\n lowercase : Union[str, Any] =\t\t\t\t\tmodel_class(_A )\r\n lowercase : List[Any] =\t\t\t\t\tinspect.signature(model.forward )\r\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\r\n lowercase : List[Any] =\t\t\t\t\t[*signature.parameters.keys()]\r\n\r\n lowercase : Tuple =\t\t\t\t\t['''pixel_values''']\r\n self.assertListEqual(arg_names[:1] ,\t\t_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Tuple =\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_model(*_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: int )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : int =\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_for_masked_image_modeling(*_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[Any] )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Optional[Any] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs()\r\n self.model_tester.create_and_check_for_image_classification(*_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Union[str, Any] ,\t\t_A\t\t\t\t\t\t: List[Any] ,\t\t_A\t\t\t\t\t\t: List[str] ,\t\t_A\t\t\t\t\t\t: str=False )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Optional[int] =\t\t\t\t\tsuper()._prepare_for_class(_A ,\t\t_A ,\t\treturn_labels=_A )\r\n\r\n if return_labels:\r\n if model_class.__name__ == \"DeiTForImageClassificationWithTeacher\":\r\n del inputs_dict[\"labels\"]\r\n\r\n return inputs_dict\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: int )\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not self.model_tester.is_training:\r\n return\r\n\r\n lowercase ,\t\t\tlowercase : str =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n lowercase : Union[str, Any] =\t\t\t\t\tTrue\r\n\r\n for model_class in self.all_model_classes:\r\n # DeiTForImageClassificationWithTeacher supports inference-only\r\n if (\r\n model_class in get_values(_A )\r\n or model_class.__name__ == \"DeiTForImageClassificationWithTeacher\"\r\n ):\r\n continue\r\n lowercase : Optional[Any] =\t\t\t\t\tmodel_class(_A )\r\n model.to(_A )\r\n model.train()\r\n lowercase : Optional[int] =\t\t\t\t\tself._prepare_for_class(_A ,\t\t_A ,\t\treturn_labels=_A )\r\n lowercase : List[Any] =\t\t\t\t\tmodel(**_A ).loss\r\n loss.backward()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Optional[int] )\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase ,\t\t\tlowercase : Dict =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n if not self.model_tester.is_training:\r\n return\r\n\r\n lowercase : List[Any] =\t\t\t\t\tFalse\r\n lowercase : List[str] =\t\t\t\t\tTrue\r\n\r\n for model_class in self.all_model_classes:\r\n if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:\r\n continue\r\n # DeiTForImageClassificationWithTeacher supports inference-only\r\n if model_class.__name__ == \"DeiTForImageClassificationWithTeacher\":\r\n continue\r\n lowercase : List[str] =\t\t\t\t\tmodel_class(_A )\r\n model.gradient_checkpointing_enable()\r\n model.to(_A )\r\n model.train()\r\n lowercase : Optional[Any] =\t\t\t\t\tself._prepare_for_class(_A ,\t\t_A ,\t\treturn_labels=_A )\r\n lowercase : Tuple =\t\t\t\t\tmodel(**_A ).loss\r\n loss.backward()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Optional[int] )\t\t\t\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase ,\t\t\tlowercase : Optional[int] =\t\t\t\t\tself.model_tester.prepare_config_and_inputs_for_common()\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\t[\r\n {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},\r\n {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},\r\n {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},\r\n ]\r\n\r\n for model_class in self.all_model_classes:\r\n if (\r\n model_class\r\n not in [\r\n *get_values(_A ),\r\n *get_values(_A ),\r\n ]\r\n or model_class.__name__ == \"DeiTForImageClassificationWithTeacher\"\r\n ):\r\n continue\r\n\r\n for problem_type in problem_types:\r\n with self.subTest(msg=f\"\"\"Testing {model_class} with {problem_type['title']}\"\"\" ):\r\n lowercase : List[Any] =\t\t\t\t\tproblem_type['''title''']\r\n lowercase : List[Any] =\t\t\t\t\tproblem_type['''num_labels''']\r\n\r\n lowercase : Optional[int] =\t\t\t\t\tmodel_class(_A )\r\n model.to(_A )\r\n model.train()\r\n\r\n lowercase : List[Any] =\t\t\t\t\tself._prepare_for_class(_A ,\t\t_A ,\t\treturn_labels=_A )\r\n\r\n if problem_type[\"num_labels\"] > 1:\r\n lowercase : Optional[Any] =\t\t\t\t\tinputs['''labels'''].unsqueeze(1 ).repeat(1 ,\t\tproblem_type['''num_labels'''] )\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\tinputs['''labels'''].to(problem_type['''dtype'''] )\r\n\r\n # This tests that we do not trigger the warning form PyTorch \"Using a target size that is different\r\n # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure\r\n # they have the same size.\" which is a symptom something in wrong for the regression problem.\r\n # See https://github.com/huggingface/transformers/issues/11780\r\n with warnings.catch_warnings(record=_A ) as warning_list:\r\n lowercase : List[str] =\t\t\t\t\tmodel(**_A ).loss\r\n for w in warning_list:\r\n if \"Using a target size that is different to the input size\" in str(w.message ):\r\n raise ValueError(\r\n f\"\"\"Something is going wrong in the regression problem: intercepted {w.message}\"\"\" )\r\n\r\n loss.backward()\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def __a ( self\t\t\t\t\t\t: Any )\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\r\n lowercase : str =\t\t\t\t\tDeiTModel.from_pretrained(_A )\r\n self.assertIsNotNone(_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef snake_case(\t\t\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tUnion[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n lowercase : str =\t\t\t\t\tImage.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )\r\n return image\r\n\r\n\r\n\r\n@require_torch\r\n@require_vision\r\nclass \t\t_A ( unittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n @cached_property\r\n def __a ( self\t\t\t\t\t\t: Optional[int] )\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )\r\n if is_vision_available()\r\n else None\r\n )\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def __a ( self\t\t\t\t\t\t: Union[str, Any] )\t\t\t\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : str =\t\t\t\t\tDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(\r\n _A )\r\n\r\n lowercase : Any =\t\t\t\t\tself.default_image_processor\r\n lowercase : List[str] =\t\t\t\t\tprepare_img()\r\n lowercase : str =\t\t\t\t\timage_processor(images=_A ,\t\treturn_tensors='''pt''' ).to(_A )\r\n\r\n # forward pass\r\n with torch.no_grad():\r\n lowercase : int =\t\t\t\t\tmodel(**_A )\r\n\r\n # verify the logits\r\n lowercase : Any =\t\t\t\t\ttorch.Size((1, 1_000) )\r\n self.assertEqual(outputs.logits.shape ,\t\t_A )\r\n\r\n lowercase : Dict =\t\t\t\t\ttorch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_A )\r\n\r\n self.assertTrue(torch.allclose(outputs.logits[0, :3] ,\t\t_A ,\t\tatol=1E-4 ) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n @require_accelerate\r\n @require_torch_gpu\r\n def __a ( self\t\t\t\t\t\t: List[Any] )\t\t\t\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Any =\t\t\t\t\tDeiTModel.from_pretrained(\r\n '''facebook/deit-base-distilled-patch16-224''' ,\t\ttorch_dtype=torch.floataa ,\t\tdevice_map='''auto''' )\r\n lowercase : str =\t\t\t\t\tself.default_image_processor\r\n\r\n lowercase : List[str] =\t\t\t\t\tprepare_img()\r\n lowercase : List[Any] =\t\t\t\t\timage_processor(images=_A ,\t\treturn_tensors='''pt''' )\r\n lowercase : List[str] =\t\t\t\t\tinputs.pixel_values.to(_A )\r\n\r\n # forward pass to make sure inference works in fp16\r\n with torch.no_grad():\r\n lowercase : List[Any] =\t\t\t\t\tmodel(_A )"},"code_codestyle":{"kind":"number","value":217,"string":"217"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport unittest\r\n\r\nfrom transformers import AlbertTokenizer, AlbertTokenizerFast\r\nfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow\r\n\r\nfrom ...test_tokenization_common import TokenizerTesterMixin\r\n\r\n\r\nlowerCAmelCase_\t\t\t\t\t\t =\t\t\t\t\tget_tests_dir('fixtures/spiece.model')\r\n@require_sentencepiece\r\n@require_tokenizers\r\nclass \t\t_A ( _lowerCamelCase , unittest.TestCase ):\r\n _UpperCamelCase : Tuple\t\t\t\t\t= AlbertTokenizer\r\n _UpperCamelCase : Dict\t\t\t\t\t= AlbertTokenizerFast\r\n _UpperCamelCase : Optional[Any]\t\t\t\t\t= True\r\n _UpperCamelCase : Union[str, Any]\t\t\t\t\t= True\r\n _UpperCamelCase : List[str]\t\t\t\t\t= True\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: int )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n super().setUp()\r\n\r\n # We have a SentencePiece fixture for testing\r\n lowercase : List[Any] =\t\t\t\t\tAlbertTokenizer(_A )\r\n tokenizer.save_pretrained(self.tmpdirname )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[Any] ,\t\t_A\t\t\t\t\t\t: Tuple )\t\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : int =\t\t\t\t\t'''this is a test'''\r\n lowercase : List[str] =\t\t\t\t\t'''this is a test'''\r\n return input_text, output_text\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: str )\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Optional[int] =\t\t\t\t\t''''''\r\n lowercase : Any =\t\t\t\t\t0\r\n\r\n self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) ,\t\t_A )\r\n self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) ,\t\t_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: int )\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : str =\t\t\t\t\tlist(self.get_tokenizer().get_vocab().keys() )\r\n\r\n self.assertEqual(vocab_keys[0] ,\t\t'''''' )\r\n self.assertEqual(vocab_keys[1] ,\t\t'''''' )\r\n self.assertEqual(vocab_keys[-1] ,\t\t'''▁eloquent''' )\r\n self.assertEqual(len(_A ) ,\t\t30_000 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[str] )\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n self.assertEqual(self.get_tokenizer().vocab_size ,\t\t30_000 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Optional[int] )\t\t\t\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n if not self.test_rust_tokenizer:\r\n return\r\n\r\n lowercase : List[Any] =\t\t\t\t\tself.get_tokenizer()\r\n lowercase : str =\t\t\t\t\tself.get_rust_tokenizer()\r\n\r\n lowercase : List[str] =\t\t\t\t\t'''I was born in 92000, and this is falsé.'''\r\n\r\n lowercase : int =\t\t\t\t\ttokenizer.tokenize(_A )\r\n lowercase : List[Any] =\t\t\t\t\trust_tokenizer.tokenize(_A )\r\n self.assertListEqual(_A ,\t\t_A )\r\n\r\n lowercase : List[str] =\t\t\t\t\ttokenizer.encode(_A ,\t\tadd_special_tokens=_A )\r\n lowercase : Tuple =\t\t\t\t\trust_tokenizer.encode(_A ,\t\tadd_special_tokens=_A )\r\n self.assertListEqual(_A ,\t\t_A )\r\n\r\n lowercase : List[Any] =\t\t\t\t\tself.get_rust_tokenizer()\r\n lowercase : Dict =\t\t\t\t\ttokenizer.encode(_A )\r\n lowercase : List[Any] =\t\t\t\t\trust_tokenizer.encode(_A )\r\n self.assertListEqual(_A ,\t\t_A )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: List[str] )\t\t\t\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\tAlbertTokenizer(_A ,\t\tkeep_accents=_A )\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\ttokenizer.tokenize('''This is a test''' )\r\n self.assertListEqual(_A ,\t\t['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )\r\n\r\n self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,\t\t[48, 25, 21, 1_289] )\r\n\r\n lowercase : Dict =\t\t\t\t\ttokenizer.tokenize('''I was born in 92000, and this is falsé.''' )\r\n self.assertListEqual(\r\n _A ,\t\t['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )\r\n lowercase : Union[str, Any] =\t\t\t\t\ttokenizer.convert_tokens_to_ids(_A )\r\n self.assertListEqual(_A ,\t\t[31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )\r\n\r\n lowercase : Any =\t\t\t\t\ttokenizer.convert_ids_to_tokens(_A )\r\n self.assertListEqual(\r\n _A ,\t\t['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''''', '''.'''] ,\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __a ( self\t\t\t\t\t\t: Dict )\t\t\t\t\t\t-> Union[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Tuple =\t\t\t\t\tAlbertTokenizer(_A )\r\n\r\n lowercase : List[str] =\t\t\t\t\ttokenizer.encode('''sequence builders''' )\r\n lowercase : int =\t\t\t\t\ttokenizer.encode('''multi-sequence build''' )\r\n\r\n lowercase : Union[str, Any] =\t\t\t\t\ttokenizer.build_inputs_with_special_tokens(_A )\r\n lowercase : Dict =\t\t\t\t\ttokenizer.build_inputs_with_special_tokens(_A ,\t\t_A )\r\n\r\n assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]\r\n assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [\r\n tokenizer.sep_token_id\r\n ]\r\n\r\n\r\n\r\n\r\n\r\n\r\n @slow\r\n def __a ( self\t\t\t\t\t\t: List[Any] )\t\t\t\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n lowercase : Any =\t\t\t\t\t{'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\r\n # fmt: on\r\n\r\n self.tokenizer_integration_test_util(\r\n expected_encoding=_A ,\t\tmodel_name='''albert-base-v2''' ,\t\trevision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' ,\t\t)"},"style_context_codestyle":{"kind":"number","value":217,"string":"217"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":583,"cells":{"code":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rimport unittest\r\rfrom diffusers.pipelines.pipeline_utils import is_safetensors_compatible\r\r\r\r\rclass \t\t\t\tlowercase__(\t\t\t\tunittest.TestCase ):\r\t'''simple docstring'''\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :int\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tTuple:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Any\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.bin''',\r\t\t\t '''safety_checker/model.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.bin''',\r\t\t\t '''text_encoder/model.safetensors''',\r\t\t\t '''unet/diffusion_pytorch_model.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.safetensors''',\r\t\t\t]\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Union[str, Any]\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tList[Any]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Dict\t\t\t\t= [\r\t\t\t '''unet/diffusion_pytorch_model.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.safetensors''',\r\t\t\t]\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Dict\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tList[str]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : List[Any]\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.bin''',\r\t\t\t '''safety_checker/model.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.bin''',\r\t\t\t '''text_encoder/model.safetensors''',\r\t\t\t '''unet/diffusion_pytorch_model.bin''',\r\t\t\t # Removed: 'unet/diffusion_pytorch_model.safetensors',\r\t\t\t]\r\t\t\tself.assertFalse(is_safetensors_compatible(lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :List[str]\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tTuple:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Optional[int]\t\t\t\t= [\r\t\t\t '''text_encoder/pytorch_model.bin''',\r\t\t\t '''text_encoder/model.safetensors''',\r\t\t\t]\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Any\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[Any]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : List[str]\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.bin''',\r\t\t\t '''safety_checker/model.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.bin''',\r\t\t\t # Removed: 'text_encoder/model.safetensors',\r\t\t\t '''unet/diffusion_pytorch_model.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.safetensors''',\r\t\t\t]\r\t\t\tself.assertFalse(is_safetensors_compatible(lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Optional[int]\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tTuple:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : str\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.fp16.bin''',\r\t\t\t '''safety_checker/model.fp16.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.fp16.bin''',\r\t\t\t '''text_encoder/model.fp16.safetensors''',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Union[str, Any]\t\t\t\t= '''fp16'''\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :str\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tTuple:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Tuple\t\t\t\t= [\r\t\t\t '''unet/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Dict\t\t\t\t= '''fp16'''\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :int\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tint:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : int\t\t\t\t= [\r\t\t\t '''unet/diffusion_pytorch_model.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Union[str, Any]\t\t\t\t= '''fp16'''\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Tuple\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tList[str]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Dict\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.fp16.bin''',\r\t\t\t '''safety_checker/model.fp16.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.fp16.bin''',\r\t\t\t '''text_encoder/model.fp16.safetensors''',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.bin''',\r\t\t\t # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Dict\t\t\t\t= '''fp16'''\r\t\t\tself.assertFalse(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Optional[int]\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tstr:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : Union[str, Any]\t\t\t\t= [\r\t\t\t '''text_encoder/pytorch_model.fp16.bin''',\r\t\t\t '''text_encoder/model.fp16.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Optional[Any]\t\t\t\t= '''fp16'''\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Dict\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[int]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : List[Any]\t\t\t\t= [\r\t\t\t '''text_encoder/pytorch_model.bin''',\r\t\t\t '''text_encoder/model.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : str\t\t\t\t= '''fp16'''\r\t\t\tself.assertTrue(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r\r\r\tdef __lowerCAmelCase (\t\t\t\t\t\tself :Optional[Any]\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[Any]:\r\r\r\r\r\r\r\r\t\t\t'''simple docstring'''\r\r\r\r\r\t\t\tSCREAMING_SNAKE_CASE : int\t\t\t\t= [\r\t\t\t '''safety_checker/pytorch_model.fp16.bin''',\r\t\t\t '''safety_checker/model.fp16.safetensors''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''vae/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t '''text_encoder/pytorch_model.fp16.bin''',\r\t\t\t # 'text_encoder/model.fp16.safetensors',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.bin''',\r\t\t\t '''unet/diffusion_pytorch_model.fp16.safetensors''',\r\t\t\t]\r\t\t\tSCREAMING_SNAKE_CASE : Optional[Any]\t\t\t\t= '''fp16'''\r\t\t\tself.assertFalse(is_safetensors_compatible(lowerCamelCase_\t\t, variant=lowerCamelCase_\t\t\t\t\t)\t\t\t\t\t)\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":18,"string":"18"},"style_context":{"kind":"string","value":"\r\"\"\"simple docstring\"\"\"\r\r\r\r\r\r\rfrom __future__ import annotations\r\rfrom numpy import array, cos, cross, floataa, radians, sin\rfrom numpy.typing import NDArray\r\r\r\r\r\rdef __A\t\t\t\t\t\t( a_ :\t\t\t\t\t\t\tfloat\t\t,\t\ta_ :\t\t\t\t\t\t\tfloat\t\t,\t\ta_ :\t\t\t\t\t\t\tbool = False )->\t\t\t\t\t\t\tlist[float]:\r\r\r\r\r\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\tif radian_mode:\r\t\t\t\treturn [magnitude * cos(a_ ), magnitude * sin(a_ )]\r\t\treturn [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]\r\r\r\r\r\rdef __A\t\t\t\t\t\t( a_ :\t\t\t\t\t\t\tNDArray[floataa]\t\t,\t\ta_ :\t\t\t\t\t\t\tNDArray[floataa]\t\t,\t\ta_ :\t\t\t\t\t\t\tfloat = 10**-1 )->\t\t\t\t\t\t\tbool:\r\r\r\r\r\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\tSCREAMING_SNAKE_CASE : NDArray[floataa]\t\t\t\t= cross(a_\t\t,\t\ta_ )\r\t\tSCREAMING_SNAKE_CASE : float\t\t\t\t= sum(a_ )\r\t\treturn abs(a_ ) < eps\r\r\rif __name__ == \"__main__\":\r\t\t\t\t\t# Test to check if it works\r\t\t\t\t\tlowerCamelCase__ : Optional[Any]\t\t\t\t\t\t =\t\t\tarray(\r\t\t\t\t\t [\r\t\t\t\t\t polar_force(7_1_8.4, 180 - 30),\r\t\t\t\t\t polar_force(8_7_9.5_4, 45),\r\t\t\t\t\t polar_force(100, -90),\r\t\t\t\t\t ]\r\t\t\t\t\t)\r\r\t\t\t\t\tlowerCamelCase__ : NDArray[floataa]\t\t\t\t\t\t =\t\t\tarray([[0, 0], [0, 0], [0, 0]])\r\r\t\t\t\t\tassert in_static_equilibrium(forces, location)\r\r\t\t\t\t\t# Problem 1 in image_data/2D_problems.jpg\r\t\t\t\t\tlowerCamelCase__ : Union[str, Any]\t\t\t\t\t\t =\t\t\tarray(\r\t\t\t\t\t [\r\t\t\t\t\t polar_force(30 * 9.8_1, 15),\r\t\t\t\t\t polar_force(215, 180 - 45),\r\t\t\t\t\t polar_force(264, 90 - 30),\r\t\t\t\t\t ]\r\t\t\t\t\t)\r\r\t\t\t\t\tlowerCamelCase__ : Any\t\t\t\t\t\t =\t\t\tarray([[0, 0], [0, 0], [0, 0]])\r\r\t\t\t\t\tassert in_static_equilibrium(forces, location)\r\r\t\t\t\t\t# Problem in image_data/2D_problems_1.jpg\r\t\t\t\t\tlowerCamelCase__ : Union[str, Any]\t\t\t\t\t\t =\t\t\tarray([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])\r\r\t\t\t\t\tlowerCamelCase__ : Optional[int]\t\t\t\t\t\t =\t\t\tarray([[0, 0], [6, 0], [10, 0], [12, 0]])\r\r\t\t\t\t\tassert in_static_equilibrium(forces, location)\r\r\t\t\t\t\timport doctest\r\r\t\t\t\t\tdoctest.testmod()\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":18,"string":"18"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":584,"cells":{"code":{"kind":"string","value":"from collections import OrderedDict\nfrom typing import Any, Mapping, Optional\n\nfrom ... import PreTrainedTokenizer\nfrom ...configuration_utils import PretrainedConfig\nfrom ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast\nfrom ...onnx.utils import compute_effective_axis_dimension\nfrom ...utils import TensorType, is_torch_available, logging\n\n\nlowerCamelCase__\t\t\t\t\t\t\t =\t\t\t\t\tlogging.get_logger(__name__)\n\nlowerCamelCase__\t\t\t\t\t\t\t =\t\t\t\t\t{\n 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',\n # See all Marian models at https://huggingface.co/models?filter=marian\n}\n\n\n\n\nclass UpperCamelCase\t\t\t\t\t\t( _UpperCAmelCase ):\n\t\t\t\t\t__UpperCamelCase\t\t\t\t\t\t\t= '''marian'''\n\t\t\t\t\t__UpperCamelCase\t\t\t\t\t\t\t= ['''past_key_values''']\n\t\t\t\t\t__UpperCamelCase\t\t\t\t\t\t\t= {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}\n\n\n\n\n\t\t\t\t\tdef __init__(\t\t\tself\t\t\t: Optional[Any] ,_lowerCAmelCase\t\t\t: str=58_101 ,_lowerCAmelCase\t\t\t: Union[str, Any]=None ,_lowerCAmelCase\t\t\t: Tuple=1_024 ,_lowerCAmelCase\t\t\t: List[Any]=12 ,_lowerCAmelCase\t\t\t: int=4_096 ,_lowerCAmelCase\t\t\t: int=16 ,_lowerCAmelCase\t\t\t: str=12 ,_lowerCAmelCase\t\t\t: List[str]=4_096 ,_lowerCAmelCase\t\t\t: Tuple=16 ,_lowerCAmelCase\t\t\t: List[Any]=0.0 ,_lowerCAmelCase\t\t\t: Any=0.0 ,_lowerCAmelCase\t\t\t: List[Any]=True ,_lowerCAmelCase\t\t\t: Dict=True ,_lowerCAmelCase\t\t\t: Union[str, Any]=\"gelu\" ,_lowerCAmelCase\t\t\t: int=1_024 ,_lowerCAmelCase\t\t\t: Optional[Any]=0.1 ,_lowerCAmelCase\t\t\t: List[Any]=0.0 ,_lowerCAmelCase\t\t\t: Optional[int]=0.0 ,_lowerCAmelCase\t\t\t: str=0.0_2 ,_lowerCAmelCase\t\t\t: Tuple=58_100 ,_lowerCAmelCase\t\t\t: int=False ,_lowerCAmelCase\t\t\t: Any=58_100 ,_lowerCAmelCase\t\t\t: Tuple=0 ,_lowerCAmelCase\t\t\t: Tuple=0 ,_lowerCAmelCase\t\t\t: List[Any]=True ,**_lowerCAmelCase\t\t\t: int ,):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= vocab_size\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_vocab_size or vocab_size\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= max_position_embeddings\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= d_model\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_ffn_dim\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_layers\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_attention_heads\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_ffn_dim\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_layers\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_attention_heads\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= dropout\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= attention_dropout\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= activation_dropout\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= activation_function\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= init_std\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_layerdrop\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_layerdrop\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= use_cache\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_layers\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= scale_embedding # scale factor will be sqrt(d_model) if True\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= share_encoder_decoder_embeddings\n\t\t\t\t\t\t\tsuper().__init__(\n\t\t\t\t\t\t\t pad_token_id=_lowercase ,eos_token_id=_lowercase ,is_encoder_decoder=_lowercase ,decoder_start_token_id=_lowercase ,forced_eos_token_id=_lowercase ,**_lowercase ,)\n\n\n\n\nclass UpperCamelCase\t\t\t\t\t\t( _UpperCAmelCase ):\n\n\n\n\n\t\t\t\t\t@property\n\t\t\t\t\t# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: Tuple ):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tif self.task in [\"default\", \"seq2seq-lm\"]:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= OrderedDict(\n\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t (\"input_ids\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t (\"attention_mask\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t ] )\n\n\t\t\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch'''}\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 1: '''decoder_sequence'''}\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 1: '''decoder_sequence'''}\n\n\t\t\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\tself.fill_with_past_key_values_(_lowercase ,direction=\"inputs\" )\n\t\t\t\t\t\t\telif self.task == \"causal-lm\":\n\t\t\t\t\t\t\t\t\t# TODO: figure this case out.\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= OrderedDict(\n\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t (\"input_ids\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t (\"attention_mask\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t ] )\n\t\t\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_layers\n\t\t\t\t\t\t\t\t\t\t\tfor i in range(_lowercase ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 2: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 2: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= OrderedDict(\n\t\t\t\t\t\t\t\t\t [\n\t\t\t\t\t\t\t\t\t (\"input_ids\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t (\"attention_mask\", {0: \"batch\", 1: \"encoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t (\"decoder_input_ids\", {0: \"batch\", 1: \"decoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t (\"decoder_attention_mask\", {0: \"batch\", 1: \"decoder_sequence\"}),\n\t\t\t\t\t\t\t\t\t ] )\n\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\t\t\t\t\t@property\n\t\t\t\t\t# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: Tuple ):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tif self.task in [\"default\", \"seq2seq-lm\"]:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= super().outputs\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= super(_lowercase ,self ).outputs\n\t\t\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_layers\n\t\t\t\t\t\t\t\t\t\t\tfor i in range(_lowercase ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 2: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {0: '''batch''', 2: '''past_sequence + sequence'''}\n\t\t\t\t\t\t\treturn common_outputs\n\n\n\n\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: Optional[Any] ,_lowerCAmelCase\t\t\t: PreTrainedTokenizer ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: bool = False ,_lowerCAmelCase\t\t\t: Optional[TensorType] = None ,):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self._generate_dummy_inputs_for_encoder_and_decoder(\n\t\t\t\t\t\t\t _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )\n\n\t\t\t\t\t\t\t# Generate decoder inputs\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= seq_length if not self.use_past else 1\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self._generate_dummy_inputs_for_encoder_and_decoder(\n\t\t\t\t\t\t\t _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= {F\"\"\"decoder_{name}\"\"\": tensor for name, tensor in decoder_inputs.items()}\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= dict(**_lowercase ,**_lowercase )\n\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\" )\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\timport torch\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= common_inputs['''input_ids'''].shape\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= common_inputs['''decoder_input_ids'''].shape[1]\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_attention_heads\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= (\n\t\t\t\t\t\t\t\t\t batch,\n\t\t\t\t\t\t\t\t\t num_encoder_attention_heads,\n\t\t\t\t\t\t\t\t\t encoder_seq_length,\n\t\t\t\t\t\t\t\t\t self._config.hidden_size // num_encoder_attention_heads,\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= decoder_seq_length + 3\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= (\n\t\t\t\t\t\t\t\t\t batch,\n\t\t\t\t\t\t\t\t\t num_decoder_attention_heads,\n\t\t\t\t\t\t\t\t\t decoder_past_length,\n\t\t\t\t\t\t\t\t\t self._config.hidden_size // num_decoder_attention_heads,\n\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= torch.cat(\n\t\t\t\t\t\t\t\t\t [common_inputs[\"decoder_attention_mask\"], torch.ones(_lowercase ,_lowercase )] ,dim=1 )\n\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= []\n\t\t\t\t\t\t\t\t\t# If the number of encoder and decoder layers are present in the model configuration, both are considered\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_layers\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= min(_lowercase ,_lowercase )\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= max(_lowercase ,_lowercase ) - min_num_layers\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''\n\n\t\t\t\t\t\t\t\t\tfor _ in range(_lowercase ):\n\t\t\t\t\t\t\t\t\t\t\tcommon_inputs[\"past_key_values\"].append(\n\t\t\t\t\t\t\t\t\t\t\t (\n\t\t\t\t\t\t\t\t\t\t\t torch.zeros(_lowercase ),\n\t\t\t\t\t\t\t\t\t\t\t torch.zeros(_lowercase ),\n\t\t\t\t\t\t\t\t\t\t\t torch.zeros(_lowercase ),\n\t\t\t\t\t\t\t\t\t\t\t torch.zeros(_lowercase ),\n\t\t\t\t\t\t\t\t\t\t\t ) )\n\t\t\t\t\t\t\t\t\t# TODO: test this.\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= encoder_shape if remaining_side_name == '''encoder''' else decoder_shape\n\t\t\t\t\t\t\t\t\tfor _ in range(_lowercase ,_lowercase ):\n\t\t\t\t\t\t\t\t\t\t\tcommon_inputs[\"past_key_values\"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: int ,_lowerCAmelCase\t\t\t: PreTrainedTokenizer ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: bool = False ,_lowerCAmelCase\t\t\t: Optional[TensorType] = None ,):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self._generate_dummy_inputs_for_encoder_and_decoder(\n\t\t\t\t\t\t\t _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )\n\n\t\t\t\t\t\t\tif self.use_past:\n\t\t\t\t\t\t\t\t\tif not is_torch_available():\n\t\t\t\t\t\t\t\t\t\t\traise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\" )\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\timport torch\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= common_inputs['''input_ids'''].shape\n\t\t\t\t\t\t\t\t\t# Not using the same length for past_key_values\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= seqlen + 2\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_layers\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self.num_attention_heads\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= (\n\t\t\t\t\t\t\t\t\t batch,\n\t\t\t\t\t\t\t\t\t num_encoder_attention_heads,\n\t\t\t\t\t\t\t\t\t past_key_values_length,\n\t\t\t\t\t\t\t\t\t self._config.hidden_size // num_encoder_attention_heads,\n\t\t\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= common_inputs['''attention_mask'''].dtype\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= torch.cat(\n\t\t\t\t\t\t\t\t\t [common_inputs[\"attention_mask\"], torch.ones(_lowercase ,_lowercase ,dtype=_lowercase )] ,dim=1 )\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= [\n\t\t\t\t\t\t\t\t\t (torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )\n\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: Optional[Any] ,_lowerCAmelCase\t\t\t: PreTrainedTokenizer ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: bool = False ,_lowerCAmelCase\t\t\t: Optional[TensorType] = None ,):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= compute_effective_axis_dimension(\n\t\t\t\t\t\t\t _lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )\n\n\t\t\t\t\t\t\t# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= tokenizer.num_special_tokens_to_add(_lowercase )\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= compute_effective_axis_dimension(\n\t\t\t\t\t\t\t _lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowercase )\n\n\t\t\t\t\t\t\t# Generate dummy inputs according to compute batch and sequence\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size\n\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= dict(tokenizer(_lowercase ,return_tensors=_lowercase ) )\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: List[str] ,_lowerCAmelCase\t\t\t: PreTrainedTokenizer ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: int = -1 ,_lowerCAmelCase\t\t\t: bool = False ,_lowerCAmelCase\t\t\t: Optional[TensorType] = None ,):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tif self.task in [\"default\", \"seq2seq-lm\"]:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self._generate_dummy_inputs_for_default_and_seqaseq_lm(\n\t\t\t\t\t\t\t\t\t _lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= self._generate_dummy_inputs_for_causal_lm(\n\t\t\t\t\t\t\t\t\t _lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )\n\n\t\t\t\t\t\t\treturn common_inputs\n\n\n\n\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: str ,_lowerCAmelCase\t\t\t: Union[str, Any] ,_lowerCAmelCase\t\t\t: Any ,_lowerCAmelCase\t\t\t: Optional[int] ,_lowerCAmelCase\t\t\t: Union[str, Any] ):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tif self.task in [\"default\", \"seq2seq-lm\"]:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= super()._flatten_past_key_values_(_lowercase ,_lowercase ,_lowercase ,_lowercase )\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t__snake_case\t\t\t\t\t\t\t\t\t= super(_lowercase ,self )._flatten_past_key_values_(\n\t\t\t\t\t\t\t\t\t _lowercase ,_lowercase ,_lowercase ,_lowercase )\n\n\n\n\n\n\n\n\t\t\t\t\t@property\n\t\t\t\t\tdef \t\t\t\tUpperCamelCase_ (\t\t\tself\t\t\t: Dict ):\n\n\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\treturn 1E-4\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":524,"string":"524"},"style_context":{"kind":"string","value":"\r\r\rimport hashlib\rimport unittest\r\rfrom transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available\rfrom transformers.pipelines import DepthEstimationPipeline, pipeline\rfrom transformers.testing_utils import (\r is_pipeline_test,\r nested_simplify,\r require_tf,\r require_timm,\r require_torch,\r require_vision,\r slow,\r)\r\rfrom .test_pipelines_common import ANY\r\r\rif is_torch_available():\r import torch\r\rif is_vision_available():\r from PIL import Image\relse:\r\r class \t\t\t\t\t\t\tlowercase :\r\r\r\r\r\r\r\r @staticmethod\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\t*_lowercase :\t\t\t\t\t\tOptional[Any] , **_lowercase :\t\t\t\t\t\tstr ):\r pass\r\r\r\r\rdef a (\t\t\t\t\t\t\tA__\t\t\t\t\t\t\t)\t-> str:\r\r\r\r\r '''simple docstring'''\r\r SCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t =\t\thashlib.mda(image.tobytes()\t\t\t\t\t\t\t)\r return m.hexdigest()\r\r\r\r\r\r@is_pipeline_test\r@require_vision\r@require_timm\r@require_torch\rclass \t\t\t\t\t\t\tlowercase (\t\t\t\t\t\tunittest.TestCase\t\t\t):\r lowerCamelCase\t\t\t:\t\t\tint = MODEL_FOR_DEPTH_ESTIMATION_MAPPING\r\r\r\r\r\r\r\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\tself :\t\t\t\t\t\tList[Any] , _lowercase :\t\t\t\t\t\tTuple , _lowercase :\t\t\t\t\t\tAny , _lowercase :\t\t\t\t\t\tList[str] ):\r SCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t =\t\tDepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )\r return depth_estimator, [\r \"./tests/fixtures/tests_samples/COCO/000000039769.png\",\r \"./tests/fixtures/tests_samples/COCO/000000039769.png\",\r ]\r\r\r\r\r\r\r\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\tself :\t\t\t\t\t\tUnion[str, Any] , _lowercase :\t\t\t\t\t\tint , _lowercase :\t\t\t\t\t\tint ):\r SCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t =\t\tdepth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )\r self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )\r import datasets\r\r SCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t =\t\tdatasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )\r SCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t =\t\tdepth_estimator(\r [\r Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),\r '''http://images.cocodataset.org/val2017/000000039769.jpg''',\r # RGBA\r dataset[0]['''file'''],\r # LA\r dataset[1]['''file'''],\r # L\r dataset[2]['''file'''],\r ] )\r self.assertEqual(\r [\r {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},\r {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},\r {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},\r {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},\r {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},\r ] , _lowercase , )\r\r\r\r\r\r\r\r @require_tf\r @unittest.skip('''Depth estimation is not implemented in TF''' )\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\tself :\t\t\t\t\t\tOptional[int] ):\r pass\r\r\r\r\r\r\r\r @slow\r @require_torch\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\tself :\t\t\t\t\t\tUnion[str, Any] ):\r SCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t =\t\t'''Intel/dpt-large'''\r SCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t =\t\tpipeline('''depth-estimation''' , model=_lowercase )\r SCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t =\t\tdepth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )\r SCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t =\t\thashimage(outputs['''depth'''] )\r\r # This seems flaky.\r # self.assertEqual(outputs[\"depth\"], \"1a39394e282e9f3b0741a90b9f108977\")\r self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )\r self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )\r\r\r\r\r\r\r\r @require_torch\r def \t\t\t\t\t\t\tlowercase__ (\t\t\t\t\t\tself :\t\t\t\t\t\tstr ):\r # This is highly irregular to have no small tests.\r self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )\r\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":35,"string":"35"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":585,"cells":{"code":{"kind":"string","value":"\r\nfrom torch import nn\r\n\r\n\r\nclass UpperCamelCase_\t\t\t\t\t\t\t( nn.Module ):\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__( self\t\t\t\t,\t\t\t\t_UpperCAmelCase\t\t\t\t,\t\t\t\t_UpperCAmelCase):\r\n super().__init__()\r\n lowerCAmelCase_ =\t\t\tclass_size\r\n lowerCAmelCase_ =\t\t\tembed_size\r\n # self.mlp1 = nn.Linear(embed_size, embed_size)\r\n # self.mlp2 = (nn.Linear(embed_size, class_size))\r\n lowerCAmelCase_ =\t\t\tnn.Linear(_UpperCAmelCase\t\t\t\t,\t\t\t\t_UpperCAmelCase)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def lowercase__\t\t( self\t\t\t\t,\t\t\t\t_UpperCAmelCase):\r\n # hidden_state = nn.functional.relu(self.mlp1(hidden_state))\r\n # hidden_state = self.mlp2(hidden_state)\r\n lowerCAmelCase_ =\t\t\tself.mlp(_UpperCAmelCase)\r\n return logits\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":413,"string":"413"},"style_context":{"kind":"string","value":"\r\ndef lowerCamelCase_\t\t\t\t\t( ):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n return [\r\n a * b * (10_00 - a - b)\r\n for a in range(1\t\t,\t\t\t\t9_99\t)\r\n for b in range(A\t\t,\t\t\t\t9_99\t)\r\n if (a * a + b * b == (10_00 - a - b) ** 2)\r\n ][0]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(f'''{solution() = }''')\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":413,"string":"413"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":586,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nimport argparse\r\nimport re\r\n\r\nimport numpy as np\r\nimport requests\r\nimport torch\r\nfrom huggingface_hub import hf_hub_download\r\nfrom PIL import Image\r\n\r\nfrom transformers import (\r\n SamConfig,\r\n SamImageProcessor,\r\n SamModel,\r\n SamProcessor,\r\n SamVisionConfig,\r\n)\r\n\r\n\r\n__A\t\t\t\t\t\t\t:\t\t\tTuple =\t\t\t{\r\n \"iou_prediction_head.layers.0\": \"iou_prediction_head.proj_in\",\r\n \"iou_prediction_head.layers.1\": \"iou_prediction_head.layers.0\",\r\n \"iou_prediction_head.layers.2\": \"iou_prediction_head.proj_out\",\r\n \"mask_decoder.output_upscaling.0\": \"mask_decoder.upscale_conv1\",\r\n \"mask_decoder.output_upscaling.1\": \"mask_decoder.upscale_layer_norm\",\r\n \"mask_decoder.output_upscaling.3\": \"mask_decoder.upscale_conv2\",\r\n \"mask_downscaling.0\": \"mask_embed.conv1\",\r\n \"mask_downscaling.1\": \"mask_embed.layer_norm1\",\r\n \"mask_downscaling.3\": \"mask_embed.conv2\",\r\n \"mask_downscaling.4\": \"mask_embed.layer_norm2\",\r\n \"mask_downscaling.6\": \"mask_embed.conv3\",\r\n \"point_embeddings\": \"point_embed\",\r\n \"pe_layer.positional_encoding_gaussian_matrix\": \"shared_embedding.positional_embedding\",\r\n \"image_encoder\": \"vision_encoder\",\r\n \"neck.0\": \"neck.conv1\",\r\n \"neck.1\": \"neck.layer_norm1\",\r\n \"neck.2\": \"neck.conv2\",\r\n \"neck.3\": \"neck.layer_norm2\",\r\n \"patch_embed.proj\": \"patch_embed.projection\",\r\n \".norm\": \".layer_norm\",\r\n \"blocks\": \"layers\",\r\n}\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _A =\t\t\t\t\t\t\t{}\r\n state_dict.pop('pixel_mean'\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\r\n state_dict.pop('pixel_std'\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n _A =\t\t\t\t\t\t\tR'.*.output_hypernetworks_mlps.(\\d+).layers.(\\d+).*'\r\n\r\n for key, value in state_dict.items():\r\n for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():\r\n if key_to_modify in key:\r\n _A =\t\t\t\t\t\t\tkey.replace(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n if re.match(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t):\r\n _A =\t\t\t\t\t\t\tint(re.match(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t).group(2\t\t\t\t)\t\t\t\t)\r\n if layer_nb == 0:\r\n _A =\t\t\t\t\t\t\tkey.replace('layers.0'\t\t\t\t\t\t\t, 'proj_in'\t\t\t\t)\r\n elif layer_nb == 1:\r\n _A =\t\t\t\t\t\t\tkey.replace('layers.1'\t\t\t\t\t\t\t, 'layers.0'\t\t\t\t)\r\n elif layer_nb == 2:\r\n _A =\t\t\t\t\t\t\tkey.replace('layers.2'\t\t\t\t\t\t\t, 'proj_out'\t\t\t\t)\r\n\r\n _A =\t\t\t\t\t\t\tvalue\r\n\r\n _A =\t\t\t\t\t\t\tmodel_state_dict[\r\n 'prompt_encoder.shared_embedding.positional_embedding'\r\n ]\r\n\r\n return model_state_dict\r\n\r\n\r\n\r\n\r\n\r\ndef \t\t\t\t__lowerCAmelCase( _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, _SCREAMING_SNAKE_CASE=\"ybelkada/segment-anything\"\t\t\t\t)\t\t\t\t\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n _A =\t\t\t\t\t\t\thf_hub_download(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, F\"checkpoints/{model_name}.pth\"\t\t\t\t)\r\n\r\n if \"sam_vit_b\" in model_name:\r\n _A =\t\t\t\t\t\t\tSamConfig()\r\n elif \"sam_vit_l\" in model_name:\r\n _A =\t\t\t\t\t\t\tSamVisionConfig(\r\n hidden_size=1_024\t\t\t\t\t\t\t, num_hidden_layers=24\t\t\t\t\t\t\t, num_attention_heads=16\t\t\t\t\t\t\t, global_attn_indexes=[5, 11, 17, 23]\t\t\t\t\t\t\t, )\r\n\r\n _A =\t\t\t\t\t\t\tSamConfig(\r\n vision_config=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, )\r\n elif \"sam_vit_h\" in model_name:\r\n _A =\t\t\t\t\t\t\tSamVisionConfig(\r\n hidden_size=1_280\t\t\t\t\t\t\t, num_hidden_layers=32\t\t\t\t\t\t\t, num_attention_heads=16\t\t\t\t\t\t\t, global_attn_indexes=[7, 15, 23, 31]\t\t\t\t\t\t\t, )\r\n\r\n _A =\t\t\t\t\t\t\tSamConfig(\r\n vision_config=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, )\r\n\r\n _A =\t\t\t\t\t\t\ttorch.load(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, map_location='cpu'\t\t\t\t)\r\n _A =\t\t\t\t\t\t\treplace_keys(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n _A =\t\t\t\t\t\t\tSamImageProcessor()\r\n\r\n _A =\t\t\t\t\t\t\tSamProcessor(image_processor=_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\tSamModel(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n\r\n hf_model.load_state_dict(_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\thf_model.to('cuda'\t\t\t\t)\r\n\r\n _A =\t\t\t\t\t\t\t'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'\r\n _A =\t\t\t\t\t\t\tImage.open(requests.get(_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, stream=_SCREAMING_SNAKE_CASE\t\t\t\t).raw\t\t\t\t).convert('RGB'\t\t\t\t)\r\n\r\n _A =\t\t\t\t\t\t\t[[[400, 650]]]\r\n _A =\t\t\t\t\t\t\t[[1]]\r\n\r\n _A =\t\t\t\t\t\t\tprocessor(images=np.array(_SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t, return_tensors='pt'\t\t\t\t).to('cuda'\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n _A =\t\t\t\t\t\t\thf_model(**_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\toutput.iou_scores.squeeze()\r\n\r\n if model_name == \"sam_vit_h_4b8939\":\r\n assert scores[-1].item() == 0.579_8902_5115_9668\r\n\r\n _A =\t\t\t\t\t\t\tprocessor(\r\n images=np.array(_SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t, input_points=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, input_labels=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, return_tensors='pt'\t\t\t\t).to('cuda'\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n _A =\t\t\t\t\t\t\thf_model(**_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.9712_6030_9219_3604\r\n\r\n _A =\t\t\t\t\t\t\t((75, 275, 1_725, 850),)\r\n\r\n _A =\t\t\t\t\t\t\tprocessor(images=np.array(_SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t, input_boxes=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, return_tensors='pt'\t\t\t\t).to('cuda'\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n _A =\t\t\t\t\t\t\thf_model(**_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.8686_0156_0592_6514\r\n\r\n # Test with 2 points and 1 image.\r\n _A =\t\t\t\t\t\t\t[[[400, 650], [800, 650]]]\r\n _A =\t\t\t\t\t\t\t[[1, 1]]\r\n\r\n _A =\t\t\t\t\t\t\tprocessor(\r\n images=np.array(_SCREAMING_SNAKE_CASE\t\t\t\t)\t\t\t\t\t\t\t, input_points=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, input_labels=_SCREAMING_SNAKE_CASE\t\t\t\t\t\t\t, return_tensors='pt'\t\t\t\t).to('cuda'\t\t\t\t)\r\n\r\n with torch.no_grad():\r\n _A =\t\t\t\t\t\t\thf_model(**_SCREAMING_SNAKE_CASE\t\t\t\t)\r\n _A =\t\t\t\t\t\t\toutput.iou_scores.squeeze()\r\n\r\n assert scores[-1].item() == 0.9936_0477_9243_4692\r\n\r\n\r\nif __name__ == \"__main__\":\r\n __A\t\t\t\t\t\t\t:\t\t\tTuple =\t\t\targparse.ArgumentParser()\r\n __A\t\t\t\t\t\t\t:\t\t\tList[str] =\t\t\t[\"sam_vit_b_01ec64\", \"sam_vit_h_4b8939\", \"sam_vit_l_0b3195\"]\r\n parser.add_argument(\r\n \"--model_name\",\r\n default=\"sam_vit_h_4b8939\",\r\n choices=choices,\r\n type=str,\r\n help=\"Path to hf config.json of model to convert\",\r\n )\r\n parser.add_argument(\"--pytorch_dump_folder_path\", default=None, type=str, help=\"Path to the output PyTorch model.\")\r\n parser.add_argument(\r\n \"--push_to_hub\",\r\n action=\"store_true\",\r\n help=\"Whether to push the model and processor to the hub after converting\",\r\n )\r\n parser.add_argument(\r\n \"--model_hub_id\",\r\n default=\"ybelkada/segment-anything\",\r\n choices=choices,\r\n type=str,\r\n help=\"Path to hf config.json of model to convert\",\r\n )\r\n\r\n __A\t\t\t\t\t\t\t:\t\t\tUnion[str, Any] =\t\t\tparser.parse_args()\r\n\r\n convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":27,"string":"27"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport argparse\n\nimport requests\nimport torch\nfrom PIL import Image\n\nfrom transformers import CLIPProcessor, GroupViTConfig, GroupViTModel\n\n\n\n\ndef __UpperCAmelCase( lowercase_ ):\n # vision encoder\n if \"img_encoder.pos_embed\" in name:\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tname.replace('''img_encoder.pos_embed'''\t\t\t, '''vision_model.embeddings.position_embeddings''' )\n if \"img_encoder.patch_embed.proj\" in name:\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tname.replace('''img_encoder.patch_embed.proj'''\t\t\t, '''vision_model.embeddings.patch_embeddings.projection''' )\n if \"img_encoder.patch_embed.norm\" in name:\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tname.replace('''img_encoder.patch_embed.norm'''\t\t\t, '''vision_model.embeddings.layernorm''' )\n if \"img_encoder.layers\" in name:\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tname.replace('''img_encoder.layers'''\t\t\t, '''vision_model.encoder.stages''' )\n if \"blocks\" in name and \"res\" not in name:\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tname.replace('''blocks'''\t\t\t, '''layers''' )\n if \"attn\" in name and \"pre_assign\" not in name:\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tname.replace('''attn'''\t\t\t, '''self_attn''' )\n if \"proj\" in name and \"self_attn\" in name and \"text\" not in name:\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tname.replace('''proj'''\t\t\t, '''out_proj''' )\n if \"pre_assign_attn.attn.proj\" in name:\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tname.replace('''pre_assign_attn.attn.proj'''\t\t\t, '''pre_assign_attn.attn.out_proj''' )\n if \"norm1\" in name:\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\tname.replace('''norm1'''\t\t\t, '''layer_norm1''' )\n if \"norm2\" in name and \"pre_assign\" not in name:\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tname.replace('''norm2'''\t\t\t, '''layer_norm2''' )\n if \"img_encoder.norm\" in name:\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tname.replace('''img_encoder.norm'''\t\t\t, '''vision_model.layernorm''' )\n # text encoder\n if \"text_encoder.token_embedding\" in name:\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tname.replace('''text_encoder.token_embedding'''\t\t\t, '''text_model.embeddings.token_embedding''' )\n if \"text_encoder.positional_embedding\" in name:\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tname.replace('''text_encoder.positional_embedding'''\t\t\t, '''text_model.embeddings.position_embedding.weight''' )\n if \"text_encoder.transformer.resblocks.\" in name:\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tname.replace('''text_encoder.transformer.resblocks.'''\t\t\t, '''text_model.encoder.layers.''' )\n if \"ln_1\" in name:\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tname.replace('''ln_1'''\t\t\t, '''layer_norm1''' )\n if \"ln_2\" in name:\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tname.replace('''ln_2'''\t\t\t, '''layer_norm2''' )\n if \"c_fc\" in name:\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tname.replace('''c_fc'''\t\t\t, '''fc1''' )\n if \"c_proj\" in name:\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tname.replace('''c_proj'''\t\t\t, '''fc2''' )\n if \"text_encoder\" in name:\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tname.replace('''text_encoder'''\t\t\t, '''text_model''' )\n if \"ln_final\" in name:\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tname.replace('''ln_final'''\t\t\t, '''final_layer_norm''' )\n # projection layers\n if \"img_projector.linear_hidden.\" in name:\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tname.replace('''img_projector.linear_hidden.'''\t\t\t, '''visual_projection.''' )\n if \"img_projector.linear_out.\" in name:\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\tname.replace('''img_projector.linear_out.'''\t\t\t, '''visual_projection.3.''' )\n if \"text_projector.linear_hidden\" in name:\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tname.replace('''text_projector.linear_hidden'''\t\t\t, '''text_projection''' )\n if \"text_projector.linear_out\" in name:\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tname.replace('''text_projector.linear_out'''\t\t\t, '''text_projection.3''' )\n\n return name\n\n\n\n\ndef __UpperCAmelCase( lowercase_\t\t\t, lowercase_ ):\n for key in orig_state_dict.copy().keys():\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\torig_state_dict.pop(lowercase_ )\n\n if \"qkv\" in key:\n # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:\n # we need to split them up into separate matrices/vectors\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tkey.split('''.''' )\n _lowerCamelCase,\t_lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tint(key_split[2] ), int(key_split[4] )\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tconfig.vision_config.hidden_size\n if \"weight\" in key:\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tval[:dim, :]\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tval[dim : dim * 2, :]\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tval[-dim:, :]\n else:\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\tval[:dim]\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tval[dim : dim * 2]\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tval[-dim:]\n elif \"in_proj\" in key:\n # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:\n # we need to split them up into separate matrices/vectors\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tkey.split('''.''' )\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tint(key_split[3] )\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\tconfig.text_config.hidden_size\n if \"weight\" in key:\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tval[:dim, :]\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tval[\n dim : dim * 2, :\n ]\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tval[-dim:, :]\n else:\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tval[:dim]\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tval[dim : dim * 2]\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tval[-dim:]\n else:\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\trename_key(lowercase_ )\n # squeeze if necessary\n if (\n \"text_projection.0\" in new_name\n or \"text_projection.3\" in new_name\n or \"visual_projection.0\" in new_name\n or \"visual_projection.3\" in new_name\n ):\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tval.squeeze_()\n else:\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tval\n\n return orig_state_dict\n\n\n\n\ndef __UpperCAmelCase( ):\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\t'''http://images.cocodataset.org/val2017/000000039769.jpg'''\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tImage.open(requests.get(lowercase_\t\t\t, stream=lowercase_ ).raw )\n return im\n\n\n\n\n@torch.no_grad()\ndef __UpperCAmelCase( lowercase_\t\t\t, lowercase_\t\t\t, lowercase_=\"groupvit-gcc-yfcc\"\t\t\t, lowercase_=False ):\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tGroupViTConfig()\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tGroupViTModel(lowercase_ ).eval()\n\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\ttorch.load(lowercase_\t\t\t, map_location='''cpu''' )['''model''']\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tconvert_state_dict(lowercase_\t\t\t, lowercase_ )\n _lowerCamelCase,\t_lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tmodel.load_state_dict(lowercase_\t\t\t, strict=lowercase_ )\n assert missing_keys == [\"text_model.embeddings.position_ids\"]\n assert (unexpected_keys == [\"multi_label_logit_scale\"]) or (len(lowercase_ ) == 0)\n\n # verify result\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tCLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tprepare_img()\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tprocessor(text=['''a photo of a cat''', '''a photo of a dog''']\t\t\t, images=lowercase_\t\t\t, padding=lowercase_\t\t\t, return_tensors='''pt''' )\n\n with torch.no_grad():\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tmodel(**lowercase_ )\n\n if model_name == \"groupvit-gcc-yfcc\":\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\ttorch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )\n elif model_name == \"groupvit-gcc-redcaps\":\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\ttorch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )\n else:\n raise ValueError(F\"\"\"Model name {model_name} not supported.\"\"\" )\n assert torch.allclose(outputs.logits_per_image\t\t\t, lowercase_\t\t\t, atol=1e-3 )\n\n processor.save_pretrained(lowercase_ )\n model.save_pretrained(lowercase_ )\n print('''Successfully saved processor and model to'''\t\t\t, lowercase_ )\n\n if push_to_hub:\n print('''Pushing to the hub...''' )\n processor.push_to_hub(lowercase_\t\t\t, organization='''nielsr''' )\n model.push_to_hub(lowercase_\t\t\t, organization='''nielsr''' )\n\n\nif __name__ == \"__main__\":\n _lowerCamelCase \t\t\t\t\t\t\t= argparse.ArgumentParser()\n parser.add_argument(\n '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'\n )\n parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')\n parser.add_argument(\n '--model_name',\n default='groupvit-gccy-fcc',\n type=str,\n help='Name of the model. Expecting either \\'groupvit-gcc-yfcc\\' or \\'groupvit-gcc-redcaps\\'',\n )\n parser.add_argument(\n '--push_to_hub',\n action='store_true',\n help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',\n )\n _lowerCamelCase \t\t\t\t\t\t\t= parser.parse_args()\n\n convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)\n\n"},"style_context_codestyle":{"kind":"number","value":114,"string":"114"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":587,"cells":{"code":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\ndef lowerCamelCase\t\t\t\t\t( lowerCamelCase : int):\n if not isinstance(lowerCamelCase\t\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase):\n raise ValueError(\"\"\"check_bouncy() accepts only integer arguments\"\"\")\n A_\t\t\t\t\t\t: List[str] =\t\t\t\tstr(lowerCamelCase)\n A_\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\"\"\"\"\"\".join(sorted(lowerCamelCase))\n return sorted_str_n != str_n and sorted_str_n[::-1] != str_n\n\n\n\n\ndef lowerCamelCase\t\t\t\t\t( lowerCamelCase : float = 99):\n if not 0 < percent < 100:\n raise ValueError(\"\"\"solution() only accepts values from 0 to 100\"\"\")\n A_\t\t\t\t\t\t: Optional[int] =\t\t\t\t0\n A_\t\t\t\t\t\t: int =\t\t\t\t1\n\n while True:\n if check_bouncy(lowerCamelCase):\n bouncy_num += 1\n if (bouncy_num / num) * 100 >= percent:\n return num\n num += 1\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n print(f\"\"\"{solution(99)}\"\"\")\n"},"code_codestyle":{"kind":"number","value":27,"string":"27"},"style_context":{"kind":"string","value":"\n\n\n'''simple docstring'''\n\n\nimport baseaa\n\n\n\n\ndef lowerCamelCase\t\t\t\t\t( lowerCamelCase : str):\n return baseaa.aaaencode(string.encode(\"\"\"utf-8\"\"\"))\n\n\n\n\ndef lowerCamelCase\t\t\t\t\t( lowerCamelCase : bytes):\n return baseaa.aaadecode(lowerCamelCase).decode(\"\"\"utf-8\"\"\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n"},"style_context_codestyle":{"kind":"number","value":27,"string":"27"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":588,"cells":{"code":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport numpy as np\nimport torch\nfrom datasets import load_dataset\nfrom torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor\n\nimport transformers\nfrom transformers import (\n CONFIG_MAPPING,\n IMAGE_PROCESSOR_MAPPING,\n MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,\n AutoConfig,\n AutoImageProcessor,\n AutoModelForMaskedImageModeling,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n)\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version, send_example_telemetry\nfrom transformers.utils.versions import require_version\n_lowerCAmelCase\t\t\t\t\t\t\t: int\t\t\t\t=\t\t\t\tlogging.getLogger(__name__)\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.31.0\")\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt\")\n\n_lowerCAmelCase\t\t\t\t\t\t\t: Union[str, Any]\t\t\t\t=\t\t\t\tlist(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())\n_lowerCAmelCase\t\t\t\t\t\t\t: Tuple\t\t\t\t=\t\t\t\ttuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\n\n\n\n\n@dataclass\nclass UpperCAmelCase_ :\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default='cifar10'\t\t\t, metadata={'help': 'Name of a dataset from the datasets package'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'The column name of the images in the files. If not set, will try to use \\'image\\' or \\'img\\'.'}\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(default=_UpperCamelCase\t\t\t, metadata={'help': 'A folder containing the training data.'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(default=_UpperCamelCase\t\t\t, metadata={'help': 'A folder containing the validation data.'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: Optional[float]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=0.15\t\t\t, metadata={'help': 'Percent to split off of train for validation.'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: int\t\t\t\t\t\t=\t\t\t\t\tfield(default=3_2\t\t\t, metadata={'help': 'The size of the square patches to use for masking.'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: float\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=0.6\t\t\t, metadata={'help': 'Percentage of patches to mask.'}\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'For debugging purposes or quicker training, truncate the number of training examples to this '\n 'value if set.'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'For debugging purposes or quicker training, truncate the number of evaluation examples to this '\n 'value if set.'\n )\n }\t\t\t, )\n\n\n\n def \t\tsnake_case_ (\tself\t: str\t\t):\n _UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= {}\n if self.train_dir is not None:\n _UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= self.train_dir\n if self.validation_dir is not None:\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= self.validation_dir\n _UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t\t= data_files if data_files else None\n\n\n\n\n\n\n\n@dataclass\nclass UpperCAmelCase_ :\n __SCREAMING_SNAKE_CASE\t\t: str\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '\n 'checkpoint identifier on the hub. '\n 'Don\\'t set if you want to train a model from scratch.'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase\t\t)}\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'Pretrained config name or path if not the same as model_name'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'Override some existing default config settings when a model is trained from scratch. Example: '\n 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[str]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'}\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: str\t\t\t\t\t\t=\t\t\t\t\tfield(\n default='main'\t\t\t, metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: str\t\t\t\t\t\t=\t\t\t\t\tfield(default=_UpperCamelCase\t\t\t, metadata={'help': 'Name or path of preprocessor config.'}\t\t)\n __SCREAMING_SNAKE_CASE\t\t: bool\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'Will use the token generated when running `huggingface-cli login` (necessary to use this script '\n 'with private models).'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={\n 'help': (\n 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'\n )\n }\t\t\t, )\n __SCREAMING_SNAKE_CASE\t\t: Optional[int]\t\t\t\t\t\t=\t\t\t\t\tfield(\n default=_UpperCamelCase\t\t\t, metadata={'help': 'Stride to use for the encoder.'}\t\t\t, )\n\n\n\n\n\n\n\nclass UpperCAmelCase_ :\n\n\n\n def __init__(\tself\t: Tuple\t, A\t: Dict=1_9_2\t, A\t: Tuple=3_2\t, A\t: str=4\t, A\t: Any=0.6\t\t):\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= input_size\n _UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t\t= mask_patch_size\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= model_patch_size\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= mask_ratio\n\n if self.input_size % self.mask_patch_size != 0:\n raise ValueError(\"Input size must be divisible by mask patch size\"\t\t)\n if self.mask_patch_size % self.model_patch_size != 0:\n raise ValueError(\"Mask patch size must be divisible by model patch size\"\t\t)\n\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= self.input_size // self.mask_patch_size\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= self.mask_patch_size // self.model_patch_size\n\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= self.rand_size**2\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= int(np.ceil(self.token_count * self.mask_ratio\t\t)\t\t)\n\n\n\n def __call__(\tself\t: List[Any]\t\t):\n _UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= np.random.permutation(self.token_count\t\t)[: self.mask_count]\n _UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= np.zeros(self.token_count\t, dtype=A\t\t)\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= 1\n\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= mask.reshape((self.rand_size, self.rand_size)\t\t)\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= mask.repeat(self.scale\t, axis=0\t\t).repeat(self.scale\t, axis=1\t\t)\n\n return torch.tensor(mask.flatten()\t\t)\n\n\n\n\n\n\n\ndef \t\t__snake_case\t\t( SCREAMING_SNAKE_CASE__ :\t\tstr\t\t\t\t\t\t\t) -> int:\n\n\n\n\n\n '''simple docstring'''\n\n _UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= torch.stack([example[\"pixel_values\"] for example in examples]\t\t\t\t\t\t\t)\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= torch.stack([example[\"mask\"] for example in examples]\t\t\t\t\t\t\t)\n return {\"pixel_values\": pixel_values, \"bool_masked_pos\": mask}\n\ndef \t\t__snake_case\t\t( ) -> Union[str, Any]:\n\n\n\n\n\n '''simple docstring'''\n\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)\t\t\t\t\t\t\t)\n if len(sys.argv\t\t\t\t\t\t\t) == 2 and sys.argv[1].endswith(\".json\"\t\t\t\t\t\t\t):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n _UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t_UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n else:\n _UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t_UpperCAmelCase\t\t\t\t\t\t\t,\t\t\t_UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= parser.parse_args_into_dataclasses()\n\n # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The\n # information sent is the one passed as arguments along with your Python/PyTorch versions.\n send_example_telemetry(\"run_mim\"\t\t, SCREAMING_SNAKE_CASE__\t\t, SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\t\t, datefmt=\"%m/%d/%Y %H:%M:%S\"\t\t, handlers=[logging.StreamHandler(sys.stdout\t\t\t\t\t\t\t)]\t\t, )\n\n if training_args.should_log:\n # The default of training_args.log_level is passive, so we set log level at info here to have that default.\n transformers.utils.logging.set_verbosity_info()\n\n _UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= training_args.get_process_log_level()\n logger.setLevel(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'\n + f'distributed training: {bool(training_args.local_rank != -1\t\t\t\t\t\t\t)}, 16-bits training: {training_args.fpaa}'\t\t\t\t\t\t\t)\n logger.info(f'Training/evaluation parameters {training_args}'\t\t\t\t\t\t\t)\n\n # Detecting last checkpoint.\n _UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= None\n if os.path.isdir(training_args.output_dir\t\t\t\t\t\t\t) and training_args.do_train and not training_args.overwrite_output_dir:\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= get_last_checkpoint(training_args.output_dir\t\t\t\t\t\t\t)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir\t\t\t\t\t\t\t)\t\t\t\t\t\t\t) > 0:\n raise ValueError(\n f'Output directory ({training_args.output_dir}) already exists and is not empty. '\n \"Use --overwrite_output_dir to overcome.\"\t\t\t\t\t\t\t)\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\t\t\t\t\t\t\t)\n\n # Initialize our dataset.\n _UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= load_dataset(\n data_args.dataset_name\t\t, data_args.dataset_config_name\t\t, data_files=data_args.data_files\t\t, cache_dir=model_args.cache_dir\t\t, use_auth_token=True if model_args.use_auth_token else None\t\t, )\n\n # If we don't have a validation split, split off a percentage of train as validation.\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= None if \"validation\" in ds.keys() else data_args.train_val_split\n if isinstance(data_args.train_val_split\t\t, SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t) and data_args.train_val_split > 0.0:\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= ds[\"train\"].train_test_split(data_args.train_val_split\t\t\t\t\t\t\t)\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= split[\"train\"]\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= split[\"test\"]\n\n # Create config\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n _UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t\t= {\n \"cache_dir\": model_args.cache_dir,\n \"revision\": model_args.model_revision,\n \"use_auth_token\": True if model_args.use_auth_token else None,\n }\n if model_args.config_name_or_path:\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoConfig.from_pretrained(model_args.config_name_or_path\t\t, **SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n elif model_args.model_name_or_path:\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoConfig.from_pretrained(model_args.model_name_or_path\t\t, **SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n else:\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= CONFIG_MAPPING[model_args.model_type]()\n logger.warning(\"You are instantiating a new config instance from scratch.\"\t\t\t\t\t\t\t)\n if model_args.config_overrides is not None:\n logger.info(f'Overriding config: {model_args.config_overrides}'\t\t\t\t\t\t\t)\n config.update_from_string(model_args.config_overrides\t\t\t\t\t\t\t)\n logger.info(f'New config: {config}'\t\t\t\t\t\t\t)\n\n # make sure the decoder_type is \"simmim\" (only relevant for BEiT)\n if hasattr(SCREAMING_SNAKE_CASE__\t\t, \"decoder_type\"\t\t\t\t\t\t\t):\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= \"simmim\"\n\n # adapt config\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= model_args.image_size if model_args.image_size is not None else config.image_size\n _UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t\t= model_args.patch_size if model_args.patch_size is not None else config.patch_size\n _UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= (\n model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride\n )\n\n config.update(\n {\n \"image_size\": model_args.image_size,\n \"patch_size\": model_args.patch_size,\n \"encoder_stride\": model_args.encoder_stride,\n }\t\t\t\t\t\t\t)\n\n # create image processor\n if model_args.image_processor_name:\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoImageProcessor.from_pretrained(model_args.image_processor_name\t\t, **SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n elif model_args.model_name_or_path:\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoImageProcessor.from_pretrained(model_args.model_name_or_path\t\t, **SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n else:\n _UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= {\n conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()\n }\n _UpperCAmelCase : List[str]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= IMAGE_PROCESSOR_TYPES[model_args.model_type]()\n\n # create model\n if model_args.model_name_or_path:\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoModelForMaskedImageModeling.from_pretrained(\n model_args.model_name_or_path\t\t, from_tf=bool(\".ckpt\" in model_args.model_name_or_path\t\t\t\t\t\t\t)\t\t, config=SCREAMING_SNAKE_CASE__\t\t, cache_dir=model_args.cache_dir\t\t, revision=model_args.model_revision\t\t, use_auth_token=True if model_args.use_auth_token else None\t\t, )\n else:\n logger.info(\"Training new model from scratch\"\t\t\t\t\t\t\t)\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n if training_args.do_train:\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= ds[\"train\"].column_names\n else:\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= ds[\"validation\"].column_names\n\n if data_args.image_column_name is not None:\n _UpperCAmelCase : Any\t\t\t\t\t\t\t\t\t\t\t\t\t\t= data_args.image_column_name\n elif \"image\" in column_names:\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= \"image\"\n elif \"img\" in column_names:\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= \"img\"\n else:\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= column_names[0]\n\n # transformations as done in original SimMIM paper\n # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py\n _UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= Compose(\n [\n Lambda(lambda SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t: img.convert(\"RGB\"\t\t\t\t\t\t\t) if img.mode != \"RGB\" else img\t\t\t\t\t\t\t),\n RandomResizedCrop(model_args.image_size\t\t, scale=(0.67, 1.0)\t\t, ratio=(3.0 / 4.0, 4.0 / 3.0)\t\t\t\t\t\t\t),\n RandomHorizontalFlip(),\n ToTensor(),\n Normalize(mean=image_processor.image_mean\t\t, std=image_processor.image_std\t\t\t\t\t\t\t),\n ]\t\t\t\t\t\t\t)\n\n # create mask generator\n _UpperCAmelCase : str\t\t\t\t\t\t\t\t\t\t\t\t\t\t= MaskGenerator(\n input_size=model_args.image_size\t\t, mask_patch_size=data_args.mask_patch_size\t\t, model_patch_size=model_args.patch_size\t\t, mask_ratio=data_args.mask_ratio\t\t, )\n\n def preprocess_images(SCREAMING_SNAKE_CASE__ :\t\tList[str]\t\t\t\t\t\t\t):\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= [transforms(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t) for image in examples[image_column_name]]\n _UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= [mask_generator() for i in range(len(examples[image_column_name]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)]\n\n return examples\n\n if training_args.do_train:\n if \"train\" not in ds:\n raise ValueError(\"--do_train requires a train dataset\"\t\t\t\t\t\t\t)\n if data_args.max_train_samples is not None:\n _UpperCAmelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= ds[\"train\"].shuffle(seed=training_args.seed\t\t\t\t\t\t\t).select(range(data_args.max_train_samples\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n # Set the training transforms\n ds[\"train\"].set_transform(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n if training_args.do_eval:\n if \"validation\" not in ds:\n raise ValueError(\"--do_eval requires a validation dataset\"\t\t\t\t\t\t\t)\n if data_args.max_eval_samples is not None:\n _UpperCAmelCase : List[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= (\n ds[\"validation\"].shuffle(seed=training_args.seed\t\t\t\t\t\t\t).select(range(data_args.max_eval_samples\t\t\t\t\t\t\t)\t\t\t\t\t\t\t)\n )\n # Set the validation transforms\n ds[\"validation\"].set_transform(SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n # Initialize our trainer\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= Trainer(\n model=SCREAMING_SNAKE_CASE__\t\t, args=SCREAMING_SNAKE_CASE__\t\t, train_dataset=ds[\"train\"] if training_args.do_train else None\t\t, eval_dataset=ds[\"validation\"] if training_args.do_eval else None\t\t, tokenizer=SCREAMING_SNAKE_CASE__\t\t, data_collator=SCREAMING_SNAKE_CASE__\t\t, )\n\n # Training\n if training_args.do_train:\n _UpperCAmelCase : int\t\t\t\t\t\t\t\t\t\t\t\t\t\t= None\n if training_args.resume_from_checkpoint is not None:\n _UpperCAmelCase : Dict\t\t\t\t\t\t\t\t\t\t\t\t\t\t= training_args.resume_from_checkpoint\n elif last_checkpoint is not None:\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= last_checkpoint\n _UpperCAmelCase : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n trainer.save_model()\n trainer.log_metrics(\"train\"\t\t, train_result.metrics\t\t\t\t\t\t\t)\n trainer.save_metrics(\"train\"\t\t, train_result.metrics\t\t\t\t\t\t\t)\n trainer.save_state()\n\n # Evaluation\n if training_args.do_eval:\n _UpperCAmelCase : Optional[int]\t\t\t\t\t\t\t\t\t\t\t\t\t\t= trainer.evaluate()\n trainer.log_metrics(\"eval\"\t\t, SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n trainer.save_metrics(\"eval\"\t\t, SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n # Write model card and (optionally) push to hub\n _UpperCAmelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t\t= {\n \"finetuned_from\": model_args.model_name_or_path,\n \"tasks\": \"masked-image-modeling\",\n \"dataset\": data_args.dataset_name,\n \"tags\": [\"masked-image-modeling\"],\n }\n if training_args.push_to_hub:\n trainer.push_to_hub(**SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n else:\n trainer.create_model_card(**SCREAMING_SNAKE_CASE__\t\t\t\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n main()\n"},"code_codestyle":{"kind":"number","value":289,"string":"289"},"style_context":{"kind":"string","value":"\n\n\n\n\"\"\"simple docstring\"\"\"\n\n\n\n\nfrom typing import TYPE_CHECKING\n\nfrom ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\n\n\n_lowerCAmelCase\t\t\t\t\t\t\t: List[Any]\t\t\t\t=\t\t\t\t{\n \"configuration_lilt\": [\"LILT_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"LiltConfig\"],\n}\n\ntry:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\nexcept OptionalDependencyNotAvailable:\n pass\nelse:\n _lowerCAmelCase\t\t\t\t\t\t\t: int\t\t\t\t=\t\t\t\t[\n \"LILT_PRETRAINED_MODEL_ARCHIVE_LIST\",\n \"LiltForQuestionAnswering\",\n \"LiltForSequenceClassification\",\n \"LiltForTokenClassification\",\n \"LiltModel\",\n \"LiltPreTrainedModel\",\n ]\n\nif TYPE_CHECKING:\n from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig\n\n try:\n if not is_torch_available():\n raise OptionalDependencyNotAvailable()\n except OptionalDependencyNotAvailable:\n pass\n else:\n from .modeling_lilt import (\n LILT_PRETRAINED_MODEL_ARCHIVE_LIST,\n LiltForQuestionAnswering,\n LiltForSequenceClassification,\n LiltForTokenClassification,\n LiltModel,\n LiltPreTrainedModel,\n )\n\nelse:\n import sys\n\n _lowerCAmelCase\t\t\t\t\t\t\t: Optional[int]\t\t\t\t=\t\t\t\t_LazyModule(__name__, globals()[\"__file__\"], _import_structure, module_spec=__spec__)\n"},"style_context_codestyle":{"kind":"number","value":289,"string":"289"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":589,"cells":{"code":{"kind":"string","value":"\r\r\rimport unicodedata\rfrom dataclasses import dataclass\rfrom typing import Optional, Union\r\rimport numpy as np\r\rfrom transformers.data.data_collator import DataCollatorMixin\rfrom transformers.file_utils import PaddingStrategy\rfrom transformers.tokenization_utils_base import PreTrainedTokenizerBase\r\r\rdef __lowerCamelCase (\t\t\tA__\t: Dict\t,\tA__\t: str\t,\tA__\t: str\t,\tA__\t: Any\t\t\t\t) ->\t\t\t\t\t\tint:\r if isinstance(A__\t,\tA__\t\t\t\t):\r lowerCamelCase_\t: str\t\t= np.full((len(A__\t\t\t\t), sequence_length, 2)\t,\tA__\t\t\t\t)\r else:\r lowerCamelCase_\t: Union[str, Any]\t\t= np.full((len(A__\t\t\t\t), sequence_length)\t,\tA__\t\t\t\t)\r\r for i, tensor in enumerate(A__\t\t\t\t):\r if padding_side == \"right\":\r if isinstance(A__\t,\tA__\t\t\t\t):\r lowerCamelCase_\t: List[str]\t\t= tensor[:sequence_length]\r else:\r lowerCamelCase_\t: List[str]\t\t= tensor[:sequence_length]\r else:\r if isinstance(A__\t,\tA__\t\t\t\t):\r lowerCamelCase_\t: List[Any]\t\t= tensor[:sequence_length]\r else:\r lowerCamelCase_\t: str\t\t= tensor[:sequence_length]\r\r return out_tensor.tolist()\r\r\rdef __lowerCamelCase (\t\t\tA__\t: Tuple\t\t\t\t) ->\t\t\t\t\t\tList[Any]:\r lowerCamelCase_\t: Dict\t\t= ord(A__\t\t\t\t)\r if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\r return True\r lowerCamelCase_\t: int\t\t= unicodedata.category(A__\t\t\t\t)\r if cat.startswith(\"\"\"P\"\"\"\t\t\t\t):\r return True\r return False\r\r\r\r\r\r@dataclass\rclass SCREAMING_SNAKE_CASE_ (a__\t):\r\r\r\r\r '''simple docstring'''\r\r\r\r _a = 42\r _a = True\r _a = None\r _a = None\r _a = -100\r _a = \"pt\"\r\r\r\r def \t\t\t\t\t\t\t_lowerCAmelCase (\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[Any]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[str] )\t\t\t\t->List[str]:\r import torch\r\r lowerCamelCase_\t: str\t\t= \"\"\"label\"\"\" if \"\"\"label\"\"\" in features[0].keys() else \"\"\"labels\"\"\"\r lowerCamelCase_\t: Tuple\t\t= [feature[label_name] for feature in features] if label_name in features[0].keys() else None\r lowerCamelCase_\t: List[Any]\t\t= self.tokenizer.pad(\r __a\t\t,\t\t\t\t\t\t\tpadding=self.padding\t\t,\t\t\t\t\t\t\tmax_length=self.max_length\t\t,\t\t\t\t\t\t\tpad_to_multiple_of=self.pad_to_multiple_of\t\t,\t\t\t\t\t\t\treturn_tensors=\"\"\"pt\"\"\" if labels is None else None\t\t,\t\t\t\t\t\t\t)\r\r if labels is None:\r return batch\r\r lowerCamelCase_\t: Tuple\t\t= torch.tensor(batch[\"\"\"entity_ids\"\"\"] ).shape[1]\r lowerCamelCase_\t: Union[str, Any]\t\t= self.tokenizer.padding_side\r if padding_side == \"right\":\r lowerCamelCase_\t: Any\t\t= [\r list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels\r ]\r else:\r lowerCamelCase_\t: Any\t\t= [\r [self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels\r ]\r\r lowerCamelCase_\t: List[str]\t\t= [feature[\"\"\"ner_tags\"\"\"] for feature in features]\r lowerCamelCase_\t: Tuple\t\t= padding_tensor(__a\t\t,\t\t\t\t\t\t\t-1\t\t,\t\t\t\t\t\t\t__a\t\t,\t\t\t\t\t\t\t__a )\r lowerCamelCase_\t: int\t\t= [feature[\"\"\"original_entity_spans\"\"\"] for feature in features]\r lowerCamelCase_\t: Optional[Any]\t\t= padding_tensor(__a\t\t,\t\t\t\t\t\t\t(-1, -1)\t\t,\t\t\t\t\t\t\t__a\t\t,\t\t\t\t\t\t\t__a )\r lowerCamelCase_\t: Optional[Any]\t\t= {k: torch.tensor(__a\t\t,\t\t\t\t\t\t\tdtype=torch.intaa ) for k, v in batch.items()}\r\r return batch\r\r\r\r\r\r"},"code_codestyle":{"kind":"number","value":171,"string":"171"},"style_context":{"kind":"string","value":"\r\r\rfrom ....configuration_utils import PretrainedConfig\rfrom ....utils import logging\r\r\rsnake_case__\t\t\t\t\t\t\t: int\t\t\t\t\t\t\t\t\t=\t\t\t\tlogging.get_logger(__name__)\r\rsnake_case__\t\t\t\t\t\t\t: List[str]\t\t\t\t\t\t\t\t\t=\t\t\t\t{\r 'Visual-Attention-Network/van-base': (\r 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'\r ),\r}\r\r\r\r\r\rclass SCREAMING_SNAKE_CASE_ (a__\t):\r\r\r\r\r '''simple docstring'''\r\r\r\r _a = \"van\"\r\r\r\r def __init__(\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tint\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[Any]=224\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tDict=3\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[str]=[7, 3, 3, 3]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tAny=[4, 2, 2, 2]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tstr=[64, 128, 320, 512]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tDict=[3, 3, 12, 3]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[str]=[8, 8, 4, 4]\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[str]=\"gelu\"\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tOptional[Any]=0.02\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tDict=1e-6\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[str]=1e-2\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tOptional[int]=0.0\t\t,\t\t\t\t\t\t\t__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tstr=0.0\t\t,\t\t\t\t\t\t\t**__a\t\t\t\t\t\t\t:\t\t\t\t\t\t\tOptional[Any]\t\t,\t\t\t\t\t\t\t)\t\t\t\t->str:\r super().__init__(**__a )\r lowerCamelCase_\t: Optional[Any]\t\t= image_size\r lowerCamelCase_\t: List[str]\t\t= num_channels\r lowerCamelCase_\t: Union[str, Any]\t\t= patch_sizes\r lowerCamelCase_\t: List[Any]\t\t= strides\r lowerCamelCase_\t: Union[str, Any]\t\t= hidden_sizes\r lowerCamelCase_\t: Tuple\t\t= depths\r lowerCamelCase_\t: str\t\t= mlp_ratios\r lowerCamelCase_\t: Any\t\t= hidden_act\r lowerCamelCase_\t: Union[str, Any]\t\t= initializer_range\r lowerCamelCase_\t: Union[str, Any]\t\t= layer_norm_eps\r lowerCamelCase_\t: Union[str, Any]\t\t= layer_scale_init_value\r lowerCamelCase_\t: List[str]\t\t= drop_path_rate\r lowerCamelCase_\t: str\t\t= dropout_rate\r\r\r\r\r\r"},"style_context_codestyle":{"kind":"number","value":171,"string":"171"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":590,"cells":{"code":{"kind":"string","value":"\n\n\n\n\nfrom typing import Optional, Tuple, Union\n\nimport flax\nimport flax.linen as nn\nimport jax\nimport jax.numpy as jnp\nfrom flax.core.frozen_dict import FrozenDict\n\nfrom ..configuration_utils import ConfigMixin, flax_register_to_config\nfrom ..utils import BaseOutput\nfrom .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps\nfrom .modeling_flax_utils import FlaxModelMixin\nfrom .unet_ad_blocks_flax import (\n FlaxCrossAttnDownBlockaD,\n FlaxDownBlockaD,\n FlaxUNetMidBlockaDCrossAttn,\n)\n\n\n\n\n\n@flax.struct.dataclass\nclass \t\t\t\t\tsnake_case_ ( __A\t):\n\n\n\n\t\t\t\t'''simple docstring'''\n\t\t\t\tlowerCamelCase = 42\n\t\t\t\tlowerCamelCase = 42\n\n\n\n\n\nclass \t\t\t\t\tsnake_case_ ( nn.Module\t):\n\n\n\n\t\t\t\t'''simple docstring'''\n\t\t\t\tlowerCamelCase = 42\n\t\t\t\tlowerCamelCase = (16, 32, 96, 2_56)\n\t\t\t\tlowerCamelCase = jnp.floataa\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: int\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tList[str]:\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t self.block_out_channels[0] ,\t\t\t\t\t\t\tkernel_size=(3, 3) ,\t\t\t\t\t\t\tpadding=((1, 1), (1, 1)) ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= []\n\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(self.block_out_channels\t\t\t\t\t\t\t) - 1\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= self.block_out_channels[i]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= self.block_out_channels[i + 1]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(3, 3) ,\t\t\t\t\t\t\tpadding=((1, 1), (1, 1)) ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tblocks.append(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(3, 3) ,\t\t\t\t\t\t\tstrides=(2, 2) ,\t\t\t\t\t\t\tpadding=((1, 1), (1, 1)) ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tblocks.append(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= blocks\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t self.conditioning_embedding_channels ,\t\t\t\t\t\t\tkernel_size=(3, 3) ,\t\t\t\t\t\t\tpadding=((1, 1), (1, 1)) ,\t\t\t\t\t\t\tkernel_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tbias_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\n\n\n\n\t\t\t\tdef __call__( self\t\t\t\t\t\t\t: str ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: Tuple\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= self.conv_in(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= nn.silu(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tfor block in self.blocks:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= block(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= nn.silu(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= self.conv_out(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\treturn embedding\n\n\n\n\n\n@flax_register_to_config\nclass \t\t\t\t\tsnake_case_ ( nn.Module ,\t__A ,\t__A\t):\n\n\n\n\t\t\t\t'''simple docstring'''\n\t\t\t\tlowerCamelCase = 32\n\t\t\t\tlowerCamelCase = 4\n\t\t\t\tlowerCamelCase = (\n\t\t\t\t \"CrossAttnDownBlock2D\",\n\t\t\t\t \"CrossAttnDownBlock2D\",\n\t\t\t\t \"CrossAttnDownBlock2D\",\n\t\t\t\t \"DownBlock2D\",\n\t\t\t\t)\n\t\t\t\tlowerCamelCase = False\n\t\t\t\tlowerCamelCase = (3_20, 6_40, 12_80, 12_80)\n\t\t\t\tlowerCamelCase = 2\n\t\t\t\tlowerCamelCase = 8\n\t\t\t\tlowerCamelCase = None\n\t\t\t\tlowerCamelCase = 12_80\n\t\t\t\tlowerCamelCase = 0.0\n\t\t\t\tlowerCamelCase = False\n\t\t\t\tlowerCamelCase = jnp.floataa\n\t\t\t\tlowerCamelCase = True\n\t\t\t\tlowerCamelCase = 0\n\t\t\t\tlowerCamelCase = \"rgb\"\n\t\t\t\tlowerCamelCase = (16, 32, 96, 2_56)\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: Tuple ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: jax.random.KeyArray\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tFrozenDict:\n\t\t\t\t\t\t\t\t\t\t\t# init input tensors\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= (1, self.in_channels, self.sample_size, self.sample_size)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= jnp.zeros(__magic_name__ ,\t\t\t\t\t\t\tdtype=jnp.floataa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= jnp.ones((1,) ,\t\t\t\t\t\t\tdtype=jnp.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= jnp.zeros((1, 1, self.cross_attention_dim) ,\t\t\t\t\t\t\tdtype=jnp.floataa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= (1, 3, self.sample_size * 8, self.sample_size * 8)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= jnp.zeros(__magic_name__ ,\t\t\t\t\t\t\tdtype=jnp.floataa\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= jax.random.split(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= {\"params\": params_rng, \"dropout\": dropout_rng}\n\n\t\t\t\t\t\t\t\t\t\t\treturn self.init(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t)[\"params\"]\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: str\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= self.block_out_channels\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= block_out_channels[0] * 4\n\n\t\t\t\t\t\t\t\t\t\t\t# If `num_attention_heads` is not defined (which is the case for most models)\n\t\t\t\t\t\t\t\t\t\t\t# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.\n\t\t\t\t\t\t\t\t\t\t\t# The reason for this behavior is to correct for incorrectly named variables that were introduced\n\t\t\t\t\t\t\t\t\t\t\t# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131\n\t\t\t\t\t\t\t\t\t\t\t# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking\n\t\t\t\t\t\t\t\t\t\t\t# which is why we correct for the naming here.\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= self.num_attention_heads or self.attention_head_dim\n\n\t\t\t\t\t\t\t\t\t\t\t# input\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t block_out_channels[0] ,\t\t\t\t\t\t\tkernel_size=(3, 3) ,\t\t\t\t\t\t\tstrides=(1, 1) ,\t\t\t\t\t\t\tpadding=((1, 1), (1, 1)) ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# time\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= FlaxTimesteps(\n\t\t\t\t\t\t\t\t\t\t\t block_out_channels[0] ,\t\t\t\t\t\t\tflip_sin_to_cos=self.flip_sin_to_cos ,\t\t\t\t\t\t\tfreq_shift=self.config.freq_shift\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= FlaxTimestepEmbedding(__magic_name__ ,\t\t\t\t\t\t\tdtype=self.dtype\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= FlaxControlNetConditioningEmbedding(\n\t\t\t\t\t\t\t\t\t\t\t conditioning_embedding_channels=block_out_channels[0] ,\t\t\t\t\t\t\tblock_out_channels=self.conditioning_embedding_out_channels ,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= self.only_cross_attention\n\t\t\t\t\t\t\t\t\t\t\tif isinstance(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= (only_cross_attention,) * len(self.down_block_types\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tif isinstance(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= (num_attention_heads,) * len(self.down_block_types\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# down\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= []\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= []\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= block_out_channels[0]\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(1, 1) ,\t\t\t\t\t\t\tpadding=\"VALID\" ,\t\t\t\t\t\t\tkernel_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tbias_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tcontrolnet_down_blocks.append(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tfor i, down_block_type in enumerate(self.down_block_types\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= output_channel\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= block_out_channels[i]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= i == len(__magic_name__\t\t\t\t\t\t\t) - 1\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif down_block_type == \"CrossAttnDownBlock2D\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= FlaxCrossAttnDownBlockaD(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t in_channels=__magic_name__ ,\t\t\t\t\t\t\tout_channels=__magic_name__ ,\t\t\t\t\t\t\tdropout=self.dropout ,\t\t\t\t\t\t\tnum_layers=self.layers_per_block ,\t\t\t\t\t\t\tnum_attention_heads=num_attention_heads[i] ,\t\t\t\t\t\t\tadd_downsample=not is_final_block ,\t\t\t\t\t\t\tuse_linear_projection=self.use_linear_projection ,\t\t\t\t\t\t\tonly_cross_attention=only_cross_attention[i] ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= FlaxDownBlockaD(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t in_channels=__magic_name__ ,\t\t\t\t\t\t\tout_channels=__magic_name__ ,\t\t\t\t\t\t\tdropout=self.dropout ,\t\t\t\t\t\t\tnum_layers=self.layers_per_block ,\t\t\t\t\t\t\tadd_downsample=not is_final_block ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdown_blocks.append(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(self.layers_per_block\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(1, 1) ,\t\t\t\t\t\t\tpadding=\"VALID\" ,\t\t\t\t\t\t\tkernel_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tbias_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontrolnet_down_blocks.append(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif not is_final_block:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(1, 1) ,\t\t\t\t\t\t\tpadding=\"VALID\" ,\t\t\t\t\t\t\tkernel_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tbias_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontrolnet_down_blocks.append(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= down_blocks\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= controlnet_down_blocks\n\n\t\t\t\t\t\t\t\t\t\t\t# mid\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= block_out_channels[-1]\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= FlaxUNetMidBlockaDCrossAttn(\n\t\t\t\t\t\t\t\t\t\t\t in_channels=__magic_name__ ,\t\t\t\t\t\t\tdropout=self.dropout ,\t\t\t\t\t\t\tnum_attention_heads=num_attention_heads[-1] ,\t\t\t\t\t\t\tuse_linear_projection=self.use_linear_projection ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= nn.Conv(\n\t\t\t\t\t\t\t\t\t\t\t __magic_name__ ,\t\t\t\t\t\t\tkernel_size=(1, 1) ,\t\t\t\t\t\t\tpadding=\"VALID\" ,\t\t\t\t\t\t\tkernel_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tbias_init=nn.initializers.zeros_init() ,\t\t\t\t\t\t\tdtype=self.dtype ,\t\t\t\t\t\t\t)\n\n\n\n\n\n\t\t\t\tdef __call__( self\t\t\t\t\t\t\t: int ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: str ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: Optional[Any] ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: List[Any] ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: Union[str, Any] ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: float = 1.0 ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: bool = True ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: bool = False ,\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tUnion[FlaxControlNetOutput, Tuple]:\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= self.controlnet_conditioning_channel_order\n\t\t\t\t\t\t\t\t\t\t\tif channel_order == \"bgr\":\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= jnp.flip(__magic_name__ ,\t\t\t\t\t\t\taxis=1\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# 1. time\n\t\t\t\t\t\t\t\t\t\t\tif not isinstance(__magic_name__ ,\t\t\t\t\t\t\tjnp.ndarray\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= jnp.array([timesteps] ,\t\t\t\t\t\t\tdtype=jnp.intaa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\telif isinstance(__magic_name__ ,\t\t\t\t\t\t\tjnp.ndarray\t\t\t\t\t\t\t) and len(timesteps.shape\t\t\t\t\t\t\t) == 0:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= timesteps.astype(dtype=jnp.floataa\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= jnp.expand_dims(__magic_name__ ,\t\t\t\t\t\t\t0\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= self.time_proj(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= self.time_embedding(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# 2. pre-process\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= jnp.transpose(__magic_name__ ,\t\t\t\t\t\t\t(0, 2, 3, 1)\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= self.conv_in(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= jnp.transpose(__magic_name__ ,\t\t\t\t\t\t\t(0, 2, 3, 1)\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= self.controlnet_cond_embedding(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\tsample += controlnet_cond\n\n\t\t\t\t\t\t\t\t\t\t\t# 3. down\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= (sample,)\n\t\t\t\t\t\t\t\t\t\t\tfor down_block in self.down_blocks:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= down_block(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\tdeterministic=not train\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= down_block(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\tdeterministic=not train\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdown_block_res_samples += res_samples\n\n\t\t\t\t\t\t\t\t\t\t\t# 4. mid\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= self.mid_block(__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\t__magic_name__ ,\t\t\t\t\t\t\tdeterministic=not train\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# 5. contronet blocks\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[str] \t\t\t\t\t\t= ()\n\t\t\t\t\t\t\t\t\t\t\tfor down_block_res_sample, controlnet_block in zip(__magic_name__ ,\t\t\t\t\t\t\tself.controlnet_down_blocks\t\t\t\t\t\t\t):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[int] \t\t\t\t\t\t= controlnet_block(__magic_name__\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcontrolnet_down_block_res_samples += (down_block_res_sample,)\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Union[str, Any] \t\t\t\t\t\t= controlnet_down_block_res_samples\n\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= self.controlnet_mid_block(__magic_name__\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\t\t\t\t\t# 6. scaling\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= [sample * conditioning_scale for sample in down_block_res_samples]\n\t\t\t\t\t\t\t\t\t\t\tmid_block_res_sample *= conditioning_scale\n\n\t\t\t\t\t\t\t\t\t\t\tif not return_dict:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn (down_block_res_samples, mid_block_res_sample)\n\n\t\t\t\t\t\t\t\t\t\t\treturn FlaxControlNetOutput(\n\t\t\t\t\t\t\t\t\t\t\t down_block_res_samples=__magic_name__ ,\t\t\t\t\t\t\tmid_block_res_sample=__magic_name__\t\t\t\t\t\t\t)\n\n"},"code_codestyle":{"kind":"number","value":488,"string":"488"},"style_context":{"kind":"string","value":"\n\n\n\n\nclass \t\t\t\t\tsnake_case_ :\n\n\n\n\t\t\t\t'''simple docstring'''\n\n\n\n\n\n\t\t\t\tdef __init__( self\t\t\t\t\t\t\t: Tuple ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: Any ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: int ,\t\t\t\t\t\t\t__magic_name__\t\t\t\t\t\t\t: List[Any]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tUnion[str, Any]:\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Any \t\t\t\t\t\t= name\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Optional[Any] \t\t\t\t\t\t= value\n\t\t\t\t\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: str \t\t\t\t\t\t= weight\n\n\n\n\n\n\t\t\t\tdef __repr__( self\t\t\t\t\t\t\t: int\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[int]:\n\t\t\t\t\t\t\t\t\t\t\treturn F\"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})\"\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tList[Any]:\n\t\t\t\t\t\t\t\t\t\t\treturn self.value\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: str\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tint:\n\t\t\t\t\t\t\t\t\t\t\treturn self.name\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: Tuple\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tstr:\n\t\t\t\t\t\t\t\t\t\t\treturn self.weight\n\n\n\n\n\n\t\t\t\tdef __SCREAMING_SNAKE_CASE ( self\t\t\t\t\t\t\t: Union[str, Any]\t\t\t\t\t\t\t)\t\t\t\t\t\t\t->\t\t\tOptional[Any]:\n\t\t\t\t\t\t\t\t\t\t\treturn self.value / self.weight\n\n\n\ndef __a\t\t\t\t\t\t\t( __UpperCAmelCase\t\t:\t\t\t\t\t\tAny ,\t\t__UpperCAmelCase\t\t:\t\t\t\t\t\tTuple ,\t\t__UpperCAmelCase\t\t:\t\t\t\t\t\tAny ) ->\t\t\t\t\tint:\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: int \t\t\t\t\t\t= []\n\t\t\t\t\t\t\tfor i in range(len(__UpperCAmelCase ) ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tmenu.append(Things(name[i] ,\t\tvalue[i] ,\t\tweight[i] ) )\n\t\t\t\t\t\t\treturn menu\n\n\n\ndef __a\t\t\t\t\t\t\t( __UpperCAmelCase\t\t:\t\t\t\t\t\tList[Any] ,\t\t__UpperCAmelCase\t\t:\t\t\t\t\t\tOptional[int] ,\t\t__UpperCAmelCase\t\t:\t\t\t\t\t\tDict ) ->\t\t\t\t\tUnion[str, Any]:\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: Dict \t\t\t\t\t\t= sorted(__UpperCAmelCase ,\t\tkey=__UpperCAmelCase ,\t\treverse=__UpperCAmelCase )\n\t\t\t\t\t\t\tlowerCamelCase_\t\t\t: List[Any] \t\t\t\t\t\t= []\n\t\t\t\t\t\t\tlowerCamelCase_\t\t\t\t\t\t,\t\t\t\t\t\tlowerCamelCase_\t\t\t: Tuple \t\t\t\t\t\t= 0.0, 0.0\n\t\t\t\t\t\t\tfor i in range(len(__UpperCAmelCase ) ):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif (total_cost + items_copy[i].get_weight()) <= max_cost:\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tresult.append(items_copy[i] )\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_cost += items_copy[i].get_weight()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttotal_value += items_copy[i].get_value()\n\t\t\t\t\t\t\treturn (result, total_value)\n\n\n\ndef __a\t\t\t\t\t\t\t( ) ->\t\t\t\t\tList[str]:\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\timport doctest\n\n\tdoctest.testmod()\n\n"},"style_context_codestyle":{"kind":"number","value":488,"string":"488"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":591,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import List, Mapping\r\n\r\nfrom packaging import version\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\n__a\t\t\t\t\t\t:\tstr\t\t\t\t=\t\t\t\t\tlogging.get_logger(__name__)\r\n\r\n__a\t\t\t\t\t\t:\tTuple\t\t\t\t=\t\t\t\t\t{\r\n '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',\r\n}\r\nclass UpperCAmelCase( __UpperCAmelCase ):\r\n\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\ta\t\t\t: Union[str, Any] \t\t\t\t\t\t= '''efficientnet'''\r\n\r\n\tdef __init__( self , lowerCamelCase = 3 , lowerCamelCase = 600 , lowerCamelCase = 2.0 , lowerCamelCase = 3.1 , lowerCamelCase = 8 , lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase = [] , lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase = 0.25 , lowerCamelCase = \"swish\" , lowerCamelCase = 2560 , lowerCamelCase = \"mean\" , lowerCamelCase = 0.02 , lowerCamelCase = 0.0_01 , lowerCamelCase = 0.99 , lowerCamelCase = 0.5 , lowerCamelCase = 0.2 , **lowerCamelCase , ) -> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\tsuper().__init__(**__SCREAMING_SNAKE_CASE )\r\n\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t\tnum_channels\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[str] =\t\t\t\t\t\t\timage_size\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t\twidth_coefficient\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[int] =\t\t\t\t\t\t\tdepth_coefficient\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t\tdepth_divisor\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Tuple =\t\t\t\t\t\t\tkernel_sizes\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Dict =\t\t\t\t\t\t\tin_channels\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\tout_channels\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Dict =\t\t\t\t\t\t\tdepthwise_padding\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Dict =\t\t\t\t\t\t\tstrides\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Any =\t\t\t\t\t\t\tnum_block_repeats\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[str] =\t\t\t\t\t\t\texpand_ratios\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: str =\t\t\t\t\t\t\tsqueeze_expansion_ratio\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t\thidden_act\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[int] =\t\t\t\t\t\t\thidden_dim\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Dict =\t\t\t\t\t\t\tpooling_type\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: int =\t\t\t\t\t\t\tinitializer_range\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\tbatch_norm_eps\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[int] =\t\t\t\t\t\t\tbatch_norm_momentum\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[Any] =\t\t\t\t\t\t\tdropout_rate\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\tdrop_connect_rate\r\n\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: str =\t\t\t\t\t\t\tsum(__SCREAMING_SNAKE_CASE ) * 4\r\n\r\nclass UpperCAmelCase( __UpperCAmelCase ):\r\n\r\n\r\n\r\n\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\ta\t\t\t: Any \t\t\t\t\t\t= version.parse(\"\"\"1.11\"\"\" )\r\n\r\n\t@property\r\n\tdef \t\t\t\t\t__a\t\t\t( self ) -> Dict:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t (\"pixel_values\", {0: \"batch\", 1: \"num_channels\", 2: \"height\", 3: \"width\"}),\r\n\t\t\t\t\t\t\t\t ] )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t@property\r\n\tdef \t\t\t\t\t__a\t\t\t( self ) -> str:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\treturn 1E-5"},"code_codestyle":{"kind":"number","value":716,"string":"716"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\nimport inspect\r\nimport warnings\r\nfrom typing import Any, Dict, Optional, Union\r\n\r\nfrom packaging import version\r\ndef snake_case_ (\t*SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=2 ) ->\t\t\t\tDict:\r\n\t\t\t\t\t\t\tfrom .. import __version__\r\n\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[str] =\t\t\t\t\t\t\ttake_from\r\n\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[Any] =\t\t\t\t\t\t\t()\r\n\t\t\t\t\t\t\tif not isinstance(args[0] ,SCREAMING_SNAKE_CASE_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[Any] =\t\t\t\t\t\t\t(args,)\r\n\r\n\t\t\t\t\t\t\tfor attribute, version_name, message in args:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\traise ValueError(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t F\"\"\" version {__version__} is >= {version_name}\"\"\" )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Tuple =\t\t\t\t\t\t\tNone\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tvalues += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[Any] =\t\t\t\t\t\t\tF\"\"\"The `{attribute}` argument is deprecated and will be removed in version {version_name}.\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tvalues += (getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ),)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: str =\t\t\t\t\t\t\tF\"\"\"The `{attribute}` attribute is deprecated and will be removed in version {version_name}.\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telif deprecated_kwargs is None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Any =\t\t\t\t\t\t\tF\"\"\"`{attribute}` is deprecated and will be removed in version {version_name}.\"\"\"\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif warning is not None:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\twarning + \" \" if standard_warn else \"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twarnings.warn(warning + message ,SCREAMING_SNAKE_CASE_ ,stacklevel=SCREAMING_SNAKE_CASE_ )\r\n\r\n\t\t\t\t\t\t\tif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: List[str] =\t\t\t\t\t\t\tinspect.getouterframes(inspect.currentframe() )[1]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Union[str, Any] =\t\t\t\t\t\t\tcall_frame.filename\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: Optional[Any] =\t\t\t\t\t\t\tcall_frame.lineno\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__\t\t\t\t\t\t: str =\t\t\t\t\t\t\tcall_frame.function\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlowercase__ , lowercase__\t\t\t\t\t\t: List[str] =\t\t\t\t\t\t\tnext(iter(deprecated_kwargs.items() ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\traise TypeError(F\"\"\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\"\"\" )\r\n\r\n\t\t\t\t\t\t\tif len(SCREAMING_SNAKE_CASE_ ) == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn\r\n\t\t\t\t\t\t\telif len(SCREAMING_SNAKE_CASE_ ) == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn values[0]\r\n\t\t\t\t\t\t\treturn values"},"style_context_codestyle":{"kind":"number","value":298,"string":"298"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":592,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\nimport os\r\nimport tempfile\r\nimport unittest\r\n\r\nfrom transformers import is_torch_available\r\nfrom transformers.testing_utils import require_torch\r\n\r\n\r\nif is_torch_available():\r\n\t\t\t\timport torch\r\n\t\t\t\tfrom torch import nn\r\n\r\n\t\t\t\tfrom transformers import (\r\n\t\t\t\t Adafactor,\r\n\t\t\t\t AdamW,\r\n\t\t\t\t get_constant_schedule,\r\n\t\t\t\t get_constant_schedule_with_warmup,\r\n\t\t\t\t get_cosine_schedule_with_warmup,\r\n\t\t\t\t get_cosine_with_hard_restarts_schedule_with_warmup,\r\n\t\t\t\t get_inverse_sqrt_schedule,\r\n\t\t\t\t get_linear_schedule_with_warmup,\r\n\t\t\t\t get_polynomial_decay_schedule_with_warmup,\r\n\t\t\t\t)\r\ndef \t\t\t\t\t\tsnake_case_ (UpperCamelCase\t\t\t: Dict\t\t\t\t\t,\t\tUpperCamelCase\t\t\t: Optional[Any]=10 ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\t[]\r\n\t\t\t\t\t\t\tfor _ in range(UpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlrs.append(scheduler.get_lr()[0] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.step()\r\n\t\t\t\t\t\t\treturn lrs\r\ndef \t\t\t\t\t\tsnake_case_ (UpperCamelCase\t\t\t: Optional[Any]\t\t\t\t\t,\t\tUpperCamelCase\t\t\t: Tuple=10 ):\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\t[]\r\n\t\t\t\t\t\t\tfor step in range(UpperCamelCase ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tlrs.append(scheduler.get_lr()[0] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif step == num_steps // 2:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twith tempfile.TemporaryDirectory() as tmpdirname:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tos.path.join(UpperCamelCase\t\t\t\t\t,\t\t'''schedule.bin''' )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttorch.save(scheduler.state_dict()\t\t\t\t\t,\t\tUpperCamelCase )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\ttorch.load(UpperCamelCase )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tscheduler.load_state_dict(UpperCamelCase )\r\n\t\t\t\t\t\t\treturn lrs\r\n\r\n@require_torch\r\nclass A (\t\t\t\t\t\tunittest.TestCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: Tuple\t\t\t\t, lowerCAmelCase_\t\t\t: List[str]\t\t\t\t, lowerCAmelCase_\t\t\t: Union[str, Any]\t\t\t\t, lowerCAmelCase_\t\t\t: List[Any] ) ->\t\t\t\tint:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase_ )\t\t\t\t, len(lowerCAmelCase_ ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor a, b in zip(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_\t\t\t\t, delta=lowerCAmelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: List[str] ) ->\t\t\t\tAny:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\ttorch.tensor([0.1, -0.2, -0.1]\t\t\t\t, requires_grad=lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\ttorch.tensor([0.4, 0.2, -0.5] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tnn.MSELoss()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# No warmup, constant schedule, no gradient clipping\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tAdamW(params=[w]\t\t\t\t, lr=2e-1\t\t\t\t, weight_decay=0.0 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(1_00 ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tcriterion(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tloss.backward()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tw.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tw.grad.zero_()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListAlmostEqual(w.tolist()\t\t\t\t, [0.4, 0.2, -0.5]\t\t\t\t, tol=1e-2 )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: Union[str, Any] ) ->\t\t\t\tOptional[int]:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\ttorch.tensor([0.1, -0.2, -0.1]\t\t\t\t, requires_grad=lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\ttorch.tensor([0.4, 0.2, -0.5] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tnn.MSELoss()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# No warmup, constant schedule, no gradient clipping\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tAdafactor(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t params=[w]\t\t\t\t, lr=1e-2\t\t\t\t, eps=(1e-30, 1e-3)\t\t\t\t, clip_threshold=1.0\t\t\t\t, decay_rate=-0.8\t\t\t\t, betaa=lowerCAmelCase_\t\t\t\t, weight_decay=0.0\t\t\t\t, relative_step=lowerCAmelCase_\t\t\t\t, scale_parameter=lowerCAmelCase_\t\t\t\t, warmup_init=lowerCAmelCase_\t\t\t\t, )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(10_00 ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tcriterion(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tloss.backward()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\toptimizer.step()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tw.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tw.grad.zero_()\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListAlmostEqual(w.tolist()\t\t\t\t, [0.4, 0.2, -0.5]\t\t\t\t, tol=1e-2 )\r\n\r\n\r\n\r\n@require_torch\r\nclass A (\t\t\t\t\t\tunittest.TestCase ):\r\n\t\t\t\t\t\t\tlowercase_\t\t\t\t\t\t\t =\t\t\t\t\t\t\tnn.Linear(50\t,50 ) if is_torch_available() else None\r\n\t\t\t\t\t\t\tlowercase_\t\t\t\t\t\t\t =\t\t\t\t\t\t\tAdamW(m.parameters()\t,lr=10.0 ) if is_torch_available() else None\r\n\t\t\t\t\t\t\tlowercase_\t\t\t\t\t\t\t =\t\t\t\t\t\t\t10\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: Optional[Any]\t\t\t\t, lowerCAmelCase_\t\t\t: List[Any]\t\t\t\t, lowerCAmelCase_\t\t\t: List[str]\t\t\t\t, lowerCAmelCase_\t\t\t: str\t\t\t\t, lowerCAmelCase_\t\t\t: Optional[int]=None ) ->\t\t\t\tOptional[int]:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len(lowerCAmelCase_ )\t\t\t\t, len(lowerCAmelCase_ ) )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor a, b in zip(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertAlmostEqual(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_\t\t\t\t, delta=lowerCAmelCase_\t\t\t\t, msg=lowerCAmelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: Tuple ) ->\t\t\t\tList[Any]:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\t{'''num_warmup_steps''': 2, '''num_training_steps''': 10}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# schedulers doct format\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# function: (sched_args_dict, expected_learning_rates)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_constant_schedule: ({}, [1_0.0] * self.num_steps),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_constant_schedule_with_warmup: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {'''num_warmup_steps''': 4},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_linear_schedule_with_warmup: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {**common_kwargs},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_cosine_schedule_with_warmup: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {**common_kwargs},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_cosine_with_hard_restarts_schedule_with_warmup: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {**common_kwargs, '''num_cycles''': 2},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_polynomial_decay_schedule_with_warmup: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t get_inverse_sqrt_schedule: (\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t {'''num_warmup_steps''': 2},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor scheduler_func, data in scheds.items():\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a\t,\t\t\t\t_a \t=\t\t\t\t\t\t\tdata\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tscheduler_func(self.optimizer\t\t\t\t, **lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertEqual(len([scheduler.get_lr()[0]] )\t\t\t\t, 1 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tunwrap_schedule(lowerCAmelCase_\t\t\t\t, self.num_steps )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListAlmostEqual(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t lowerCAmelCase_\t\t\t\t, lowerCAmelCase_\t\t\t\t, tol=1e-2\t\t\t\t, msg=F'failed for {scheduler_func} in normal scheduler'\t\t\t\t, )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tscheduler_func(self.optimizer\t\t\t\t, **lowerCAmelCase_ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif scheduler_func.__name__ != \"get_constant_schedule\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tLambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_ ) # wrap to test picklability of the schedule\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tunwrap_and_save_reload_schedule(lowerCAmelCase_\t\t\t\t, self.num_steps )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.assertListEqual(lowerCAmelCase_\t\t\t\t, lowerCAmelCase_\t\t\t\t, msg=F'failed for {scheduler_func} in save and reload' )\r\n\r\n\r\n\r\nclass A :\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __init__( self\t\t\t: Optional[Any]\t\t\t\t, lowerCAmelCase_\t\t\t: Tuple ) ->\t\t\t\tOptional[int]:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tfn\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __call__( self\t\t\t: List[str]\t\t\t\t, *lowerCAmelCase_\t\t\t: Optional[int]\t\t\t\t, **lowerCAmelCase_\t\t\t: Dict ) ->\t\t\t\tOptional[Any]:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn self.fn(*lowerCAmelCase_\t\t\t\t, **lowerCAmelCase_ )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t@classmethod\r\n\t\t\t\t\t\t\tdef __lowerCAmelCase ( self\t\t\t: List[str]\t\t\t\t, lowerCAmelCase_\t\t\t: List[str] ) ->\t\t\t\tTuple:\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t_a \t=\t\t\t\t\t\t\tlist(map(self\t\t\t\t, scheduler.lr_lambdas ) )\r\n\r\n"},"code_codestyle":{"kind":"number","value":22,"string":"22"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass A__\t\t\t:\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself :List[Any]\t,\t\tSCREAMING_SNAKE_CASE :Dict\t,\t\tSCREAMING_SNAKE_CASE :Any\t,\t\tSCREAMING_SNAKE_CASE :List[str] )\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tList[str]\t =None\r\n _a :\t\t\t\t\tOptional[Any]\t =None\r\n _a :\t\t\t\t\tstr\t =graph\r\n\r\n self._normalize_graph(SCREAMING_SNAKE_CASE\t,\t\tSCREAMING_SNAKE_CASE )\r\n _a :\t\t\t\t\tOptional[int]\t =len(SCREAMING_SNAKE_CASE )\r\n _a :\t\t\t\t\tUnion[str, Any]\t =None\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :List[str]\t,\t\tSCREAMING_SNAKE_CASE :Union[str, Any]\t,\t\tSCREAMING_SNAKE_CASE :List[str] )\t\t\t-> Any:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n if sources is int:\r\n _a :\t\t\t\t\tTuple\t =[sources]\r\n if sinks is int:\r\n _a :\t\t\t\t\tOptional[int]\t =[sinks]\r\n\r\n if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:\r\n return\r\n\r\n _a :\t\t\t\t\tUnion[str, Any]\t =sources[0]\r\n _a :\t\t\t\t\tTuple\t =sinks[0]\r\n\r\n # make fake vertex if there are more\r\n # than one source or sink\r\n if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:\r\n _a :\t\t\t\t\tTuple\t =0\r\n for i in sources:\r\n max_input_flow += sum(self.graph[i] )\r\n\r\n _a :\t\t\t\t\tList[Any]\t =len(self.graph ) + 1\r\n for room in self.graph:\r\n room.insert(0\t,\t\t0 )\r\n self.graph.insert(0\t,\t\t[0] * size )\r\n for i in sources:\r\n _a :\t\t\t\t\tAny\t =max_input_flow\r\n _a :\t\t\t\t\tList[str]\t =0\r\n\r\n _a :\t\t\t\t\tList[str]\t =len(self.graph ) + 1\r\n for room in self.graph:\r\n room.append(0 )\r\n self.graph.append([0] * size )\r\n for i in sinks:\r\n _a :\t\t\t\t\tstr\t =max_input_flow\r\n _a :\t\t\t\t\tOptional[Any]\t =size - 1\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Optional[int] )\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n if self.maximum_flow_algorithm is None:\r\n raise Exception(\"\"\"You need to set maximum flow algorithm before.\"\"\" )\r\n if self.source_index is None or self.sink_index is None:\r\n return 0\r\n\r\n self.maximum_flow_algorithm.execute()\r\n return self.maximum_flow_algorithm.getMaximumFlow()\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Any\t,\t\tSCREAMING_SNAKE_CASE :Dict )\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tTuple\t =algorithm(self )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass A__\t\t\t:\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself :Tuple\t,\t\tSCREAMING_SNAKE_CASE :Optional[Any] )\t\t\t-> Dict:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tList[str]\t =flow_network\r\n _a :\t\t\t\t\tList[Any]\t =flow_network.verticesCount\r\n _a :\t\t\t\t\tstr\t =flow_network.sourceIndex\r\n _a :\t\t\t\t\tstr\t =flow_network.sinkIndex\r\n # it's just a reference, so you shouldn't change\r\n # it in your algorithms, use deep copy before doing that\r\n _a :\t\t\t\t\tList[Any]\t =flow_network.graph\r\n _a :\t\t\t\t\tOptional[int]\t =False\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :List[Any] )\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n if not self.executed:\r\n self._algorithm()\r\n _a :\t\t\t\t\tAny\t =True\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Union[str, Any] )\t\t\t-> Optional[int]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass A__\t\t\t(\t\tUpperCAmelCase__ ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself :int\t,\t\tSCREAMING_SNAKE_CASE :str )\t\t\t-> int:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n super().__init__(SCREAMING_SNAKE_CASE )\r\n # use this to save your result\r\n _a :\t\t\t\t\tList[Any]\t =-1\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Dict )\t\t\t-> Tuple:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n if not self.executed:\r\n raise Exception(\"\"\"You should execute algorithm before using its result!\"\"\" )\r\n\r\n return self.maximum_flow\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass A__\t\t\t(\t\tUpperCAmelCase__ ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __init__(\t\t\t\t\tself :str\t,\t\tSCREAMING_SNAKE_CASE :Tuple )\t\t\t-> str:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n super().__init__(SCREAMING_SNAKE_CASE )\r\n\r\n _a :\t\t\t\t\tint\t =[[0] * self.verticies_count for i in range(self.verticies_count )]\r\n\r\n _a :\t\t\t\t\tUnion[str, Any]\t =[0] * self.verticies_count\r\n _a :\t\t\t\t\tOptional[Any]\t =[0] * self.verticies_count\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Optional[int] )\t\t\t-> Optional[Any]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tint\t =self.verticies_count\r\n\r\n # push some substance to graph\r\n for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):\r\n self.preflow[self.source_index][nextvertex_index] += bandwidth\r\n self.preflow[nextvertex_index][self.source_index] -= bandwidth\r\n self.excesses[nextvertex_index] += bandwidth\r\n\r\n # Relabel-to-front selection rule\r\n _a :\t\t\t\t\tTuple\t =[\r\n i\r\n for i in range(self.verticies_count )\r\n if i != self.source_index and i != self.sink_index\r\n ]\r\n\r\n # move through list\r\n _a :\t\t\t\t\tList[Any]\t =0\r\n while i < len(SCREAMING_SNAKE_CASE ):\r\n _a :\t\t\t\t\tAny\t =vertices_list[i]\r\n _a :\t\t\t\t\tstr\t =self.heights[vertex_index]\r\n self.process_vertex(SCREAMING_SNAKE_CASE )\r\n if self.heights[vertex_index] > previous_height:\r\n # if it was relabeled, swap elements\r\n # and start from 0 index\r\n vertices_list.insert(0\t,\t\tvertices_list.pop(SCREAMING_SNAKE_CASE ) )\r\n _a :\t\t\t\t\tList[str]\t =0\r\n else:\r\n i += 1\r\n\r\n _a :\t\t\t\t\tOptional[int]\t =sum(self.preflow[self.source_index] )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Optional[int]\t,\t\tSCREAMING_SNAKE_CASE :Union[str, Any] )\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n while self.excesses[vertex_index] > 0:\r\n for neighbour_index in range(self.verticies_count ):\r\n # if it's neighbour and current vertex is higher\r\n if (\r\n self.graph[vertex_index][neighbour_index]\r\n - self.preflow[vertex_index][neighbour_index]\r\n > 0\r\n and self.heights[vertex_index] > self.heights[neighbour_index]\r\n ):\r\n self.push(SCREAMING_SNAKE_CASE\t,\t\tSCREAMING_SNAKE_CASE )\r\n\r\n self.relabel(SCREAMING_SNAKE_CASE )\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :int\t,\t\tSCREAMING_SNAKE_CASE :Optional[Any]\t,\t\tSCREAMING_SNAKE_CASE :str )\t\t\t-> List[str]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tList[str]\t =min(\r\n self.excesses[from_index]\t,\t\tself.graph[from_index][to_index] - self.preflow[from_index][to_index]\t,\t\t)\r\n self.preflow[from_index][to_index] += preflow_delta\r\n self.preflow[to_index][from_index] -= preflow_delta\r\n self.excesses[from_index] -= preflow_delta\r\n self.excesses[to_index] += preflow_delta\r\n\r\n\r\n\r\n\r\n\r\n\r\n def __UpperCAmelCase\t\t\t(\t\t\t\t\tself :Any\t,\t\tSCREAMING_SNAKE_CASE :Any )\t\t\t-> List[Any]:\r\n\r\n\r\n\r\n\r\n '''simple docstring'''\r\n _a :\t\t\t\t\tint\t =None\r\n for to_index in range(self.verticies_count ):\r\n if (\r\n self.graph[vertex_index][to_index]\r\n - self.preflow[vertex_index][to_index]\r\n > 0\r\n ) and (min_height is None or self.heights[to_index] < min_height):\r\n _a :\t\t\t\t\tOptional[Any]\t =self.heights[to_index]\r\n\r\n if min_height is not None:\r\n _a :\t\t\t\t\tAny\t =min_height + 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n A__: str =\t\t\t[0]\r\n A__: Optional[Any] =\t\t\t[3]\r\n # graph = [\r\n # [0, 0, 4, 6, 0, 0],\r\n # [0, 0, 5, 2, 0, 0],\r\n # [0, 0, 0, 0, 4, 4],\r\n # [0, 0, 0, 0, 6, 6],\r\n # [0, 0, 0, 0, 0, 0],\r\n # [0, 0, 0, 0, 0, 0],\r\n # ]\r\n A__: Tuple =\t\t\t[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]\r\n\r\n # prepare our network\r\n A__: Union[str, Any] =\t\t\tFlowNetwork(graph, entrances, exits)\r\n # set algorithm\r\n flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)\r\n # and calculate\r\n A__: List[str] =\t\t\tflow_network.find_maximum_flow()\r\n\r\n print(F\"maximum flow is {maximum_flow}\")\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":694,"string":"694"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":593,"cells":{"code":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\nfrom math import pow, sqrt\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t*_UpperCamelCase :\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tTuple:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n snake_case\t\t\t\t\t\t\t\t=\t\t\tlen(lowerCAmelCase_\t\t\t\t\t) > 0 and all(value > 0.0 for value in values\t\t\t\t\t)\r\n return result\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tAny ,\t\t\t_UpperCamelCase :\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tUnion[str, Any]:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n round(sqrt(molar_mass_a / molar_mass_a\t\t\t\t\t) ,\t\t\t6\t\t\t\t\t)\r\n if validate(lowerCAmelCase_ ,\t\t\tlowerCAmelCase_\t\t\t\t\t)\r\n else ValueError('Input Error: Molar mass values must greater than 0.'\t\t\t\t\t)\r\n )\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tTuple ,\t\t\t_UpperCamelCase :\t\t\t\tList[Any] ,\t\t\t_UpperCamelCase :\t\t\t\tAny\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tstr:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n round(effusion_rate * sqrt(molar_mass_a / molar_mass_a\t\t\t\t\t) ,\t\t\t6\t\t\t\t\t)\r\n if validate(lowerCAmelCase_ ,\t\t\tlowerCAmelCase_ ,\t\t\tlowerCAmelCase_\t\t\t\t\t)\r\n else ValueError(\r\n 'Input Error: Molar mass and effusion rate values must greater than 0.'\t\t\t\t\t)\r\n )\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tDict ,\t\t\t_UpperCamelCase :\t\t\t\tstr ,\t\t\t_UpperCamelCase :\t\t\t\tDict\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tint:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n round(effusion_rate / sqrt(molar_mass_a / molar_mass_a\t\t\t\t\t) ,\t\t\t6\t\t\t\t\t)\r\n if validate(lowerCAmelCase_ ,\t\t\tlowerCAmelCase_ ,\t\t\tlowerCAmelCase_\t\t\t\t\t)\r\n else ValueError(\r\n 'Input Error: Molar mass and effusion rate values must greater than 0.'\t\t\t\t\t)\r\n )\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tAny ,\t\t\t_UpperCamelCase :\t\t\t\tAny ,\t\t\t_UpperCamelCase :\t\t\t\tDict\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tList[str]:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,\t\t\t2\t\t\t\t\t) ,\t\t\t6\t\t\t\t\t)\r\n if validate(lowerCAmelCase_ ,\t\t\tlowerCAmelCase_ ,\t\t\tlowerCAmelCase_\t\t\t\t\t)\r\n else ValueError(\r\n 'Input Error: Molar mass and effusion rate values must greater than 0.'\t\t\t\t\t)\r\n )\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tList[Any] ,\t\t\t_UpperCamelCase :\t\t\t\tList[str] ,\t\t\t_UpperCamelCase :\t\t\t\tList[str]\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tOptional[int]:\r\n\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n return (\r\n round(pow(effusion_rate_a / effusion_rate_a ,\t\t\t2\t\t\t\t\t) / molar_mass ,\t\t\t6\t\t\t\t\t)\r\n if validate(lowerCAmelCase_ ,\t\t\tlowerCAmelCase_ ,\t\t\tlowerCAmelCase_\t\t\t\t\t)\r\n else ValueError(\r\n 'Input Error: Molar mass and effusion rate values must greater than 0.'\t\t\t\t\t)\r\n )\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":703,"string":"703"},"style_context":{"kind":"string","value":"\"\"\"simple docstring\"\"\"\r\nfrom typing import Dict\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom . import residue_constants as rc\r\nfrom .tensor_utils import tensor_tree_map, tree_map\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tDict[str, torch.Tensor]\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tDict[str, torch.Tensor]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\t[]\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\t[]\r\n\r\n\t\t\t\t\t\tfor rt in rc.restypes:\r\n\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]\r\n\t\t\t\t\t\t\t\t\t\t\t\trestype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]\t\t\t\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\t{name: i for i, name in enumerate(_UpperCamelCase\t\t\t\t\t)}\r\n\t\t\t\t\t\t\t\t\t\t\t\trestype_atomaa_to_atomaa_list.append(\r\n\t\t\t\t\t\t\t\t\t\t\t\t [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\trestype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t# Add dummy mapping for restype 'UNK'\r\n\t\t\t\t\t\trestype_atomaa_to_atomaa_list.append([0] * 1_4\t\t\t\t\t)\r\n\t\t\t\t\t\trestype_atomaa_to_atomaa_list.append([0] * 3_7\t\t\t\t\t)\r\n\t\t\t\t\t\trestype_atomaa_mask_list.append([0.0] * 1_4\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor(\r\n\t\t\t\t\t\t _UpperCamelCase ,\t\t\tdtype=torch.intaa ,\t\t\tdevice=protein['aatype'].device ,\t\t\t)\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor(\r\n\t\t\t\t\t\t _UpperCamelCase ,\t\t\tdtype=torch.intaa ,\t\t\tdevice=protein['aatype'].device ,\t\t\t)\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttorch.tensor(\r\n\t\t\t\t\t\t _UpperCamelCase ,\t\t\tdtype=torch.floataa ,\t\t\tdevice=protein['aatype'].device ,\t\t\t)\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\tprotein['aatype'].to(torch.long\t\t\t\t\t)\r\n\r\n\t\t\t\t\t\t# create the mapping for (residx, atom14) --> atom37, i.e. an array\r\n\t\t\t\t\t\t# with shape (num_res, 14) containing the atom37 indices for this protein\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trestype_atomaa_to_atomaa[protein_aatype]\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trestype_atomaa_mask[protein_aatype]\r\n\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\tresidx_atomaa_mask\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\tresidx_atomaa_to_atomaa.long()\r\n\r\n\t\t\t\t\t\t# create the gather indices for mapping back\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trestype_atomaa_to_atomaa[protein_aatype]\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\tresidx_atomaa_to_atomaa.long()\r\n\r\n\t\t\t\t\t\t# create the corresponding mask\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttorch.zeros([2_1, 3_7] ,\t\t\tdtype=torch.floataa ,\t\t\tdevice=protein['aatype'].device\t\t\t\t\t)\r\n\t\t\t\t\t\tfor restype, restype_letter in enumerate(rc.restypes\t\t\t\t\t):\r\n\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trc.restype_atoa[restype_letter]\r\n\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trc.residue_atoms[restype_name]\r\n\t\t\t\t\t\t\t\t\t\t\t\tfor atom_name in atom_names:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trc.atom_order[atom_name]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\t1\r\n\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\trestype_atomaa_mask[protein_aatype]\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\tresidx_atomaa_mask\r\n\r\n\t\t\t\t\t\treturn protein\r\n\r\n\r\n\r\n\r\n\r\ndef lowerCAmelCase__ (\t\t\t_UpperCamelCase :\t\t\t\tDict[str, torch.Tensor]\t\t\t\t\t)\t\t\t\t\t->\t\t\t\tDict[str, np.ndarray]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttree_map(lambda _UpperCamelCase\t\t\t\t\t: torch.tensor(_UpperCamelCase ,\t\t\tdevice=batch['aatype'].device\t\t\t\t\t) ,\t\t\t_UpperCamelCase ,\t\t\tnp.ndarray\t\t\t\t\t)\r\n\t\t\t\t\t\tsnake_case\t\t\t\t\t\t\t\t=\t\t\ttensor_tree_map(lambda _UpperCamelCase\t\t\t\t\t: np.array(_UpperCamelCase\t\t\t\t\t) ,\t\t\tmake_atomaa_masks(_UpperCamelCase\t\t\t\t\t)\t\t\t\t\t)\r\n\t\t\t\t\t\treturn out\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":104,"string":"104"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":594,"cells":{"code":{"kind":"string","value":"import argparse\rimport json\rfrom pathlib import Path\r\rimport requests\rimport torch\rfrom huggingface_hub import hf_hub_download\rfrom PIL import Image\r\rfrom transformers import (\r MobileViTConfig,\r MobileViTForImageClassification,\r MobileViTForSemanticSegmentation,\r MobileViTImageProcessor,\r)\rfrom transformers.utils import logging\r\r\rlogging.set_verbosity_info()\ra_\t\t = logging.get_logger(__name__)\r\r\r\r\rdef __lowerCAmelCase ( A_ :\t\t\tList[str] ) ->\tOptional[Any]:\r __UpperCAmelCase =\t\t\t\t\t\t\tMobileViTConfig()\r\r # size of the architecture\r if \"mobilevit_s\" in mobilevit_name:\r __UpperCAmelCase =\t\t\t\t\t\t\t[1_44, 1_92, 2_40]\r __UpperCAmelCase =\t\t\t\t\t\t\t[16, 32, 64, 96, 1_28, 1_60, 6_40]\r elif \"mobilevit_xs\" in mobilevit_name:\r __UpperCAmelCase =\t\t\t\t\t\t\t[96, 1_20, 1_44]\r __UpperCAmelCase =\t\t\t\t\t\t\t[16, 32, 48, 64, 80, 96, 3_84]\r elif \"mobilevit_xxs\" in mobilevit_name:\r __UpperCAmelCase =\t\t\t\t\t\t\t[64, 80, 96]\r __UpperCAmelCase =\t\t\t\t\t\t\t[16, 16, 24, 48, 64, 80, 3_20]\r __UpperCAmelCase =\t\t\t\t\t\t\t0.05\r __UpperCAmelCase =\t\t\t\t\t\t\t2.0\r\r if mobilevit_name.startswith(\"deeplabv3_\" ):\r __UpperCAmelCase =\t\t\t\t\t\t\t5_12\r __UpperCAmelCase =\t\t\t\t\t\t\t16\r __UpperCAmelCase =\t\t\t\t\t\t\t21\r __UpperCAmelCase =\t\t\t\t\t\t\t\"pascal-voc-id2label.json\"\r else:\r __UpperCAmelCase =\t\t\t\t\t\t\t10_00\r __UpperCAmelCase =\t\t\t\t\t\t\t\"imagenet-1k-id2label.json\"\r\r __UpperCAmelCase =\t\t\t\t\t\t\t\"huggingface/label-files\"\r __UpperCAmelCase =\t\t\t\t\t\t\tjson.load(open(hf_hub_download(_A ,\t\t\t\t\t\t_A ,\t\t\t\t\t\trepo_type=\"dataset\" ) ,\t\t\t\t\t\t\"r\" ) )\r __UpperCAmelCase =\t\t\t\t\t\t\t{int(_A ): v for k, v in idalabel.items()}\r __UpperCAmelCase =\t\t\t\t\t\t\tidalabel\r __UpperCAmelCase =\t\t\t\t\t\t\t{v: k for k, v in idalabel.items()}\r\r return config\r\r\r\r\rdef __lowerCAmelCase ( A_ :\t\t\tOptional[int] ,\t\t\t\t\t\tA_ :\t\t\tDict=False ) ->\tDict:\r\r\r for i in range(1 ,\t\t\t\t\t\t6 ):\r if F'''layer_{i}.''' in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(F'''layer_{i}.''' ,\t\t\t\t\t\tF'''encoder.layer.{i - 1}.''' )\r\r if \"conv_1.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"conv_1.\" ,\t\t\t\t\t\t\"conv_stem.\" )\r if \".block.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".block.\" ,\t\t\t\t\t\t\".\" )\r\r if \"exp_1x1\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"exp_1x1\" ,\t\t\t\t\t\t\"expand_1x1\" )\r if \"red_1x1\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"red_1x1\" ,\t\t\t\t\t\t\"reduce_1x1\" )\r if \".local_rep.conv_3x3.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".local_rep.conv_3x3.\" ,\t\t\t\t\t\t\".conv_kxk.\" )\r if \".local_rep.conv_1x1.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".local_rep.conv_1x1.\" ,\t\t\t\t\t\t\".conv_1x1.\" )\r if \".norm.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".norm.\" ,\t\t\t\t\t\t\".normalization.\" )\r if \".conv.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".conv.\" ,\t\t\t\t\t\t\".convolution.\" )\r if \".conv_proj.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".conv_proj.\" ,\t\t\t\t\t\t\".conv_projection.\" )\r\r for i in range(0 ,\t\t\t\t\t\t2 ):\r for j in range(0 ,\t\t\t\t\t\t4 ):\r if F'''.{i}.{j}.''' in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(F'''.{i}.{j}.''' ,\t\t\t\t\t\tF'''.{i}.layer.{j}.''' )\r\r for i in range(2 ,\t\t\t\t\t\t6 ):\r for j in range(0 ,\t\t\t\t\t\t4 ):\r if F'''.{i}.{j}.''' in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(F'''.{i}.{j}.''' ,\t\t\t\t\t\tF'''.{i}.''' )\r if \"expand_1x1\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"expand_1x1\" ,\t\t\t\t\t\t\"downsampling_layer.expand_1x1\" )\r if \"conv_3x3\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"conv_3x3\" ,\t\t\t\t\t\t\"downsampling_layer.conv_3x3\" )\r if \"reduce_1x1\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"reduce_1x1\" ,\t\t\t\t\t\t\"downsampling_layer.reduce_1x1\" )\r\r for i in range(2 ,\t\t\t\t\t\t5 ):\r if F'''.global_rep.{i}.weight''' in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(F'''.global_rep.{i}.weight''' ,\t\t\t\t\t\t\".layernorm.weight\" )\r if F'''.global_rep.{i}.bias''' in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(F'''.global_rep.{i}.bias''' ,\t\t\t\t\t\t\".layernorm.bias\" )\r\r if \".global_rep.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".global_rep.\" ,\t\t\t\t\t\t\".transformer.\" )\r if \".pre_norm_mha.0.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".pre_norm_mha.0.\" ,\t\t\t\t\t\t\".layernorm_before.\" )\r if \".pre_norm_mha.1.out_proj.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".pre_norm_mha.1.out_proj.\" ,\t\t\t\t\t\t\".attention.output.dense.\" )\r if \".pre_norm_ffn.0.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".pre_norm_ffn.0.\" ,\t\t\t\t\t\t\".layernorm_after.\" )\r if \".pre_norm_ffn.1.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".pre_norm_ffn.1.\" ,\t\t\t\t\t\t\".intermediate.dense.\" )\r if \".pre_norm_ffn.4.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".pre_norm_ffn.4.\" ,\t\t\t\t\t\t\".output.dense.\" )\r if \".transformer.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".transformer.\" ,\t\t\t\t\t\t\".transformer.layer.\" )\r\r if \".aspp_layer.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".aspp_layer.\" ,\t\t\t\t\t\t\".\" )\r if \".aspp_pool.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\".aspp_pool.\" ,\t\t\t\t\t\t\".\" )\r if \"seg_head.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"seg_head.\" ,\t\t\t\t\t\t\"segmentation_head.\" )\r if \"segmentation_head.classifier.classifier.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"segmentation_head.classifier.classifier.\" ,\t\t\t\t\t\t\"segmentation_head.classifier.\" )\r\r if \"classifier.fc.\" in name:\r __UpperCAmelCase =\t\t\t\t\t\t\tname.replace(\"classifier.fc.\" ,\t\t\t\t\t\t\"classifier.\" )\r elif (not base_model) and (\"segmentation_head.\" not in name):\r __UpperCAmelCase =\t\t\t\t\t\t\t\"mobilevit.\" + name\r\r return name\r\r\r\r\rdef __lowerCAmelCase ( A_ :\t\t\tAny ,\t\t\t\t\t\tA_ :\t\t\tUnion[str, Any] ,\t\t\t\t\t\tA_ :\t\t\tOptional[Any]=False ) ->\tTuple:\r\r\r if base_model:\r __UpperCAmelCase =\t\t\t\t\t\t\t\"\"\r else:\r __UpperCAmelCase =\t\t\t\t\t\t\t\"mobilevit.\"\r\r for key in orig_state_dict.copy().keys():\r __UpperCAmelCase =\t\t\t\t\t\t\torig_state_dict.pop(_A )\r\r if key[:8] == \"encoder.\":\r __UpperCAmelCase =\t\t\t\t\t\t\tkey[8:]\r\r if \"qkv\" in key:\r __UpperCAmelCase =\t\t\t\t\t\t\tkey.split(\".\" )\r __UpperCAmelCase =\t\t\t\t\t\t\tint(key_split[0][6:] ) - 1\r __UpperCAmelCase =\t\t\t\t\t\t\tint(key_split[3] )\r __UpperCAmelCase =\t\t\t\t\t\t\tmodel.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )\r __UpperCAmelCase =\t\t\t\t\t\t\tlayer.transformer.layer[transformer_num].attention.attention.all_head_size\r __UpperCAmelCase =\t\t\t\t\t\t\t(\r F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''\r )\r if \"weight\" in key:\r __UpperCAmelCase =\t\t\t\t\t\t\tval[:dim, :]\r __UpperCAmelCase =\t\t\t\t\t\t\tval[dim : dim * 2, :]\r __UpperCAmelCase =\t\t\t\t\t\t\tval[-dim:, :]\r else:\r __UpperCAmelCase =\t\t\t\t\t\t\tval[:dim]\r __UpperCAmelCase =\t\t\t\t\t\t\tval[dim : dim * 2]\r __UpperCAmelCase =\t\t\t\t\t\t\tval[-dim:]\r else:\r __UpperCAmelCase =\t\t\t\t\t\t\tval\r\r return orig_state_dict\r\r\r\r\rdef __lowerCAmelCase ( ) ->\tUnion[str, Any]:\r __UpperCAmelCase =\t\t\t\t\t\t\t\"http://images.cocodataset.org/val2017/000000039769.jpg\"\r __UpperCAmelCase =\t\t\t\t\t\t\tImage.open(requests.get(_A ,\t\t\t\t\t\tstream=_A ).raw )\r return im\r\r\r\r\r@torch.no_grad()\rdef __lowerCAmelCase ( A_ :\t\t\tstr ,\t\t\t\t\t\tA_ :\t\t\tint ,\t\t\t\t\t\tA_ :\t\t\tint ,\t\t\t\t\t\tA_ :\t\t\tstr=False ) ->\tUnion[str, Any]:\r __UpperCAmelCase =\t\t\t\t\t\t\tget_mobilevit_config(_A )\r\r # load original state_dict\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.load(_A ,\t\t\t\t\t\tmap_location=\"cpu\" )\r\r # load 🤗 model\r if mobilevit_name.startswith(\"deeplabv3_\" ):\r __UpperCAmelCase =\t\t\t\t\t\t\tMobileViTForSemanticSegmentation(_A ).eval()\r else:\r __UpperCAmelCase =\t\t\t\t\t\t\tMobileViTForImageClassification(_A ).eval()\r\r __UpperCAmelCase =\t\t\t\t\t\t\tconvert_state_dict(_A ,\t\t\t\t\t\t_A )\r model.load_state_dict(_A )\r\r # Check outputs on an image, prepared by MobileViTImageProcessor\r __UpperCAmelCase =\t\t\t\t\t\t\tMobileViTImageProcessor(crop_size=config.image_size ,\t\t\t\t\t\tsize=config.image_size + 32 )\r __UpperCAmelCase =\t\t\t\t\t\t\timage_processor(images=prepare_img() ,\t\t\t\t\t\treturn_tensors=\"pt\" )\r __UpperCAmelCase =\t\t\t\t\t\t\tmodel(**_A )\r __UpperCAmelCase =\t\t\t\t\t\t\toutputs.logits\r\r if mobilevit_name.startswith(\"deeplabv3_\" ):\r assert logits.shape == (1, 21, 32, 32)\r\r if mobilevit_name == \"deeplabv3_mobilevit_s\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor(\r [\r [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],\r [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],\r [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],\r ] )\r elif mobilevit_name == \"deeplabv3_mobilevit_xs\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor(\r [\r [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],\r [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],\r [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],\r ] )\r elif mobilevit_name == \"deeplabv3_mobilevit_xxs\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor(\r [\r [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],\r [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],\r [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],\r ] )\r else:\r raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )\r\r assert torch.allclose(logits[0, :3, :3, :3] ,\t\t\t\t\t\t_A ,\t\t\t\t\t\tatol=1e-4 )\r else:\r assert logits.shape == (1, 10_00)\r\r if mobilevit_name == \"mobilevit_s\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor([-0.98_66, 0.23_92, -1.12_41] )\r elif mobilevit_name == \"mobilevit_xs\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor([-2.47_61, -0.93_99, -1.95_87] )\r elif mobilevit_name == \"mobilevit_xxs\":\r __UpperCAmelCase =\t\t\t\t\t\t\ttorch.tensor([-1.93_64, -1.23_27, -0.46_53] )\r else:\r raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )\r\r assert torch.allclose(logits[0, :3] ,\t\t\t\t\t\t_A ,\t\t\t\t\t\tatol=1e-4 )\r\r Path(_A ).mkdir(exist_ok=_A )\r print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )\r model.save_pretrained(_A )\r print(F'''Saving image processor to {pytorch_dump_folder_path}''' )\r image_processor.save_pretrained(_A )\r\r if push_to_hub:\r __UpperCAmelCase =\t\t\t\t\t\t\t{\r \"mobilevit_s\": \"mobilevit-small\",\r \"mobilevit_xs\": \"mobilevit-x-small\",\r \"mobilevit_xxs\": \"mobilevit-xx-small\",\r \"deeplabv3_mobilevit_s\": \"deeplabv3-mobilevit-small\",\r \"deeplabv3_mobilevit_xs\": \"deeplabv3-mobilevit-x-small\",\r \"deeplabv3_mobilevit_xxs\": \"deeplabv3-mobilevit-xx-small\",\r }\r\r print(\"Pushing to the hub...\" )\r __UpperCAmelCase =\t\t\t\t\t\t\tmodel_mapping[mobilevit_name]\r image_processor.push_to_hub(_A ,\t\t\t\t\t\torganization=\"apple\" )\r model.push_to_hub(_A ,\t\t\t\t\t\torganization=\"apple\" )\r\r\rif __name__ == \"__main__\":\r a_\t\t = argparse.ArgumentParser()\r # Required parameters\r parser.add_argument(\r \"\"\"--mobilevit_name\"\"\",\r default=\"\"\"mobilevit_s\"\"\",\r type=str,\r help=(\r \"\"\"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',\"\"\"\r \"\"\" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.\"\"\"\r ),\r )\r parser.add_argument(\r \"\"\"--checkpoint_path\"\"\", required=True, type=str, help=\"\"\"Path to the original state dict (.pt file).\"\"\"\r )\r parser.add_argument(\r \"\"\"--pytorch_dump_folder_path\"\"\", required=True, type=str, help=\"\"\"Path to the output PyTorch model directory.\"\"\"\r )\r parser.add_argument(\r \"\"\"--push_to_hub\"\"\", action=\"\"\"store_true\"\"\", help=\"\"\"Whether or not to push the converted model to the 🤗 hub.\"\"\"\r )\r\r a_\t\t = parser.parse_args()\r convert_movilevit_checkpoint(\r args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub\r )\r\r\r\r"},"code_codestyle":{"kind":"number","value":221,"string":"221"},"style_context":{"kind":"string","value":"import enum\r\nimport os\r\nfrom hashlib import shaaaa\r\nfrom typing import Optional\r\n\r\nfrom .. import config\r\nfrom .logging import get_logger\r\n\r\n\r\nSCREAMING_SNAKE_CASE\t\t\t:\t\t\t\t\t\tList[Any]\t\t\t\t =\t\tget_logger(__name__)\r\nclass A_\t\t\t(\t\t\t\t\t\t\tenum.Enum\t\t\t):\r\n _SCREAMING_SNAKE_CASE \t\t\t\t\t= \"\"\"all_checks\"\"\"\r\n _SCREAMING_SNAKE_CASE \t\t\t\t\t= \"\"\"basic_checks\"\"\"\r\n _SCREAMING_SNAKE_CASE \t\t\t\t\t= \"\"\"no_checks\"\"\"\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\n\r\n\r\n\r\ndef \t__A\t\t( _A\t\t\t\t,\t\t\t\t\t_A\t\t\t\t,\t\t\t\t\t_A=None ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n if expected_checksums is None:\r\n logger.info(\"Unable to verify checksums.\" )\r\n return\r\n if len(set(_A ) - set(_A ) ) > 0:\r\n raise ExpectedMoreDownloadedFiles(str(set(_A ) - set(_A ) ) )\r\n if len(set(_A ) - set(_A ) ) > 0:\r\n raise UnexpectedDownloadedFile(str(set(_A ) - set(_A ) ) )\r\n __a\t\t\t\t\t\t\t=\t[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]\r\n __a\t\t\t\t\t\t\t=\t\" for \" + verification_name if verification_name is not None else \"\"\r\n if len(_A ) > 0:\r\n raise NonMatchingChecksumError(\r\n f\"\"\"Checksums didn't match{for_verification_name}:\\n\"\"\"\r\n f\"\"\"{bad_urls}\\n\"\"\"\r\n \"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error\" )\r\n logger.info(\"All the checksums matched successfully\" + for_verification_name )\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\nclass A_\t\t\t(\t\t\t\t\t\t\ta_\t\t\t):\r\n pass\r\n\r\n\r\n\r\ndef \t__A\t\t( _A\t\t\t\t,\t\t\t\t\t_A ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n if expected_splits is None:\r\n logger.info(\"Unable to verify splits sizes.\" )\r\n return\r\n if len(set(_A ) - set(_A ) ) > 0:\r\n raise ExpectedMoreSplits(str(set(_A ) - set(_A ) ) )\r\n if len(set(_A ) - set(_A ) ) > 0:\r\n raise UnexpectedSplits(str(set(_A ) - set(_A ) ) )\r\n __a\t\t\t\t\t\t\t=\t[\r\n {\"expected\": expected_splits[name], \"recorded\": recorded_splits[name]}\r\n for name in expected_splits\r\n if expected_splits[name].num_examples != recorded_splits[name].num_examples\r\n ]\r\n if len(_A ) > 0:\r\n raise NonMatchingSplitsSizesError(str(_A ) )\r\n logger.info(\"All the splits matched successfully.\" )\r\n\r\n\r\n\r\ndef \t__A\t\t( _A\t\t\t\t,\t\t\t\t\t_A = True ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n if record_checksum:\r\n __a\t\t\t\t\t\t\t=\tshaaaa()\r\n with open(_A\t\t\t\t,\t\t\t\t\t\"rb\" ) as f:\r\n for chunk in iter(lambda: f.read(1 << 20 )\t\t\t\t,\t\t\t\t\tB\"\" ):\r\n m.update(_A )\r\n __a\t\t\t\t\t\t\t=\tm.hexdigest()\r\n else:\r\n __a\t\t\t\t\t\t\t=\tNone\r\n return {\"num_bytes\": os.path.getsize(_A ), \"checksum\": checksum}\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef \t__A\t\t( _A ):\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n\r\n if dataset_size and config.IN_MEMORY_MAX_SIZE:\r\n return dataset_size < config.IN_MEMORY_MAX_SIZE\r\n else:\r\n return False\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":197,"string":"197"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":595,"cells":{"code":{"kind":"string","value":"\n\n\n\nlowercase_\t\t\t\t\t\t\t\t= tuple[float, float, float]\nlowercase_\t\t\t\t\t\t\t\t= tuple[float, float, float]\n\n\n\ndef __lowerCAmelCase ( SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ ):\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tend_pointa[0] - end_pointa[0]\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tend_pointa[1] - end_pointa[1]\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tend_pointa[2] - end_pointa[2]\n\t\t\treturn (x, y, z)\n\n\n\ndef __lowerCAmelCase ( SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ ):\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tab[1] * ac[2] - ab[2] * ac[1] # *i\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\t(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tab[0] * ac[1] - ab[1] * ac[0] # *k\n\t\t\treturn (x, y, z)\n\n\n\ndef __lowerCAmelCase ( SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ ):\n\t\t\treturn tuple(round(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0)\n\n\n\n\ndef __lowerCAmelCase ( SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ = 10 ):\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tcreate_vector(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ )\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tcreate_vector(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ )\n\t\t\treturn is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ )\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ )\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":37,"string":"37"},"style_context":{"kind":"string","value":"\n\n\n\n# Usage:\n# ./gen-card-facebook-wmt19.py\n\nimport os\nfrom pathlib import Path\n\n\n\ndef __lowerCAmelCase ( SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, SCREAMING_SNAKE_CASE_ ):\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\t{\n\t\t\t \"en\": \"Machine learning is great, isn't it?\",\n\t\t\t \"ru\": \"Машинное обучение - это здорово, не так ли?\",\n\t\t\t \"de\": \"Maschinelles Lernen ist großartig, oder?\",\n\t\t\t}\n\n\t\t\t# BLUE scores as follows:\n\t\t\t# \"pair\": [fairseq, transformers]\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\t{\n\t\t\t \"ru-en\": [\"[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)\", \"39.20\"],\n\t\t\t \"en-ru\": [\"[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)\", \"33.47\"],\n\t\t\t \"en-de\": [\"[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)\", \"42.83\"],\n\t\t\t \"de-en\": [\"[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)\", \"41.35\"],\n\t\t\t}\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tf'''{src_lang}-{tgt_lang}'''\n\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tf'''\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'''\n\t\t\tos.makedirs(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, exist_ok=SCREAMING_SNAKE_CASE_ )\n\t\t\tlowercase__\t\t\t\t\t =\t\t\t\tos.path.join(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, \"README.md\" )\n\t\t\tprint(f'''Generating {path}''' )\n\t\t\twith open(SCREAMING_SNAKE_CASE_\t\t\t\t\t\t, \"w\"\t\t\t\t\t\t, encoding=\"utf-8\" ) as f:\n\t\t\t\t\t\tf.write(SCREAMING_SNAKE_CASE_ )\n\n\n# make sure we are under the root of the project\nlowercase_\t\t\t\t\t\t\t\t= Path(__file__).resolve().parent.parent.parent\nlowercase_\t\t\t\t\t\t\t\t= repo_dir / \"\"\"model_cards\"\"\"\n\nfor model_name in [\"wmt19-ru-en\", \"wmt19-en-ru\", \"wmt19-en-de\", \"wmt19-de-en\"]:\n\t\t\t\t\t\t\tlowercase_\t\t\t\t, lowercase_\t\t\t\t, lowercase_\t\t\t\t\t\t\t\t= model_name.split(\"\"\"-\"\"\")\n\t\t\t\t\t\t\tlowercase_\t\t\t\t\t\t\t\t= model_cards_dir / \"\"\"facebook\"\"\" / model_name\n\t\t\t\t\t\t\twrite_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":37,"string":"37"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":596,"cells":{"code":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\n\n\n\n\nimport tempfile\n\nimport numpy as np\nimport torch\nfrom transformers import AutoTokenizer, TaEncoderModel\n\nfrom diffusers import DDPMScheduler, UNetaDConditionModel\nfrom diffusers.models.attention_processor import AttnAddedKVProcessor\nfrom diffusers.pipelines.deepfloyd_if import IFWatermarker\nfrom diffusers.utils.testing_utils import torch_device\n\nfrom ..test_pipelines_common import to_np\n\n\n\n\n\nclass \t\t\t\tlowerCAmelCase_\t\t\t\t\t\t:\n\n\n\n\n\n def \t__snake_case (\t\t\t\t\t\t\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tList[Any] ):\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tList[Any]\t\t =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tDict\t\t =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tint\t\t =UNetaDConditionModel(\n sample_size=32,\t\t\t\t\t\t\tlayers_per_block=1,\t\t\t\t\t\t\tblock_out_channels=[32, 64],\t\t\t\t\t\t\tdown_block_types=[\n '''ResnetDownsampleBlock2D''',\n '''SimpleCrossAttnDownBlock2D''',\n ],\t\t\t\t\t\t\tmid_block_type='''UNetMidBlock2DSimpleCrossAttn''',\t\t\t\t\t\t\tup_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''],\t\t\t\t\t\t\tin_channels=3,\t\t\t\t\t\t\tout_channels=6,\t\t\t\t\t\t\tcross_attention_dim=32,\t\t\t\t\t\t\tencoder_hid_dim=32,\t\t\t\t\t\t\tattention_head_dim=8,\t\t\t\t\t\t\taddition_embed_type='''text''',\t\t\t\t\t\t\taddition_embed_type_num_heads=2,\t\t\t\t\t\t\tcross_attention_norm='''group_norm''',\t\t\t\t\t\t\tresnet_time_scale_shift='''scale_shift''',\t\t\t\t\t\t\tact_fn='''gelu''',\t\t\t\t\t\t\t)\n unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tAny\t\t =DDPMScheduler(\n num_train_timesteps=1_000,\t\t\t\t\t\t\tbeta_schedule='''squaredcos_cap_v2''',\t\t\t\t\t\t\tbeta_start=0.0001,\t\t\t\t\t\t\tbeta_end=0.02,\t\t\t\t\t\t\tthresholding=_snake_case,\t\t\t\t\t\t\tdynamic_thresholding_ratio=0.95,\t\t\t\t\t\t\tsample_max_value=1.0,\t\t\t\t\t\t\tprediction_type='''epsilon''',\t\t\t\t\t\t\tvariance_type='''learned_range''',\t\t\t\t\t\t\t)\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tList[str]\t\t =IFWatermarker()\n\n return {\n \"text_encoder\": text_encoder,\n \"tokenizer\": tokenizer,\n \"unet\": unet,\n \"scheduler\": scheduler,\n \"watermarker\": watermarker,\n \"safety_checker\": None,\n \"feature_extractor\": None,\n }\n\n\n\n\n\n def \t__snake_case (\t\t\t\t\t\t\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tstr ):\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tOptional[Any]\t\t =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tint\t\t =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tOptional[int]\t\t =UNetaDConditionModel(\n sample_size=32,\t\t\t\t\t\t\tlayers_per_block=[1, 2],\t\t\t\t\t\t\tblock_out_channels=[32, 64],\t\t\t\t\t\t\tdown_block_types=[\n '''ResnetDownsampleBlock2D''',\n '''SimpleCrossAttnDownBlock2D''',\n ],\t\t\t\t\t\t\tmid_block_type='''UNetMidBlock2DSimpleCrossAttn''',\t\t\t\t\t\t\tup_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''],\t\t\t\t\t\t\tin_channels=6,\t\t\t\t\t\t\tout_channels=6,\t\t\t\t\t\t\tcross_attention_dim=32,\t\t\t\t\t\t\tencoder_hid_dim=32,\t\t\t\t\t\t\tattention_head_dim=8,\t\t\t\t\t\t\taddition_embed_type='''text''',\t\t\t\t\t\t\taddition_embed_type_num_heads=2,\t\t\t\t\t\t\tcross_attention_norm='''group_norm''',\t\t\t\t\t\t\tresnet_time_scale_shift='''scale_shift''',\t\t\t\t\t\t\tact_fn='''gelu''',\t\t\t\t\t\t\tclass_embed_type='''timestep''',\t\t\t\t\t\t\tmid_block_scale_factor=1.414,\t\t\t\t\t\t\ttime_embedding_act_fn='''gelu''',\t\t\t\t\t\t\ttime_embedding_dim=32,\t\t\t\t\t\t\t)\n unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tDict\t\t =DDPMScheduler(\n num_train_timesteps=1_000,\t\t\t\t\t\t\tbeta_schedule='''squaredcos_cap_v2''',\t\t\t\t\t\t\tbeta_start=0.0001,\t\t\t\t\t\t\tbeta_end=0.02,\t\t\t\t\t\t\tthresholding=_snake_case,\t\t\t\t\t\t\tdynamic_thresholding_ratio=0.95,\t\t\t\t\t\t\tsample_max_value=1.0,\t\t\t\t\t\t\tprediction_type='''epsilon''',\t\t\t\t\t\t\tvariance_type='''learned_range''',\t\t\t\t\t\t\t)\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tTuple\t\t =DDPMScheduler(\n num_train_timesteps=1_000,\t\t\t\t\t\t\tbeta_schedule='''squaredcos_cap_v2''',\t\t\t\t\t\t\tbeta_start=0.0001,\t\t\t\t\t\t\tbeta_end=0.02,\t\t\t\t\t\t\t)\n\n torch.manual_seed(0 )\n snake_case :\t\t\t\tDict\t\t =IFWatermarker()\n\n return {\n \"text_encoder\": text_encoder,\n \"tokenizer\": tokenizer,\n \"unet\": unet,\n \"scheduler\": scheduler,\n \"image_noising_scheduler\": image_noising_scheduler,\n \"watermarker\": watermarker,\n \"safety_checker\": None,\n \"feature_extractor\": None,\n }\n\n\n\n\n\n def \t__snake_case (\t\t\t\t\t\t\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tint ):\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n snake_case :\t\t\t\tList[Any]\t\t =self.get_dummy_components()\n snake_case :\t\t\t\tDict\t\t =self.pipeline_class(**_snake_case )\n pipe.to(_snake_case )\n pipe.set_progress_bar_config(disable=_snake_case )\n\n snake_case :\t\t\t\tOptional[int]\t\t =self.get_dummy_inputs(_snake_case )\n\n snake_case :\t\t\t\tAny\t\t =inputs['''prompt''']\n snake_case :\t\t\t\tstr\t\t =inputs['''generator''']\n snake_case :\t\t\t\tUnion[str, Any]\t\t =inputs['''num_inference_steps''']\n snake_case :\t\t\t\tDict\t\t =inputs['''output_type''']\n\n if \"image\" in inputs:\n snake_case :\t\t\t\tUnion[str, Any]\t\t =inputs['''image''']\n else:\n snake_case :\t\t\t\tList[str]\t\t =None\n\n if \"mask_image\" in inputs:\n snake_case :\t\t\t\tOptional[int]\t\t =inputs['''mask_image''']\n else:\n snake_case :\t\t\t\tUnion[str, Any]\t\t =None\n\n if \"original_image\" in inputs:\n snake_case :\t\t\t\tAny\t\t =inputs['''original_image''']\n else:\n snake_case :\t\t\t\tList[Any]\t\t =None\n\n snake_case ,\t\t\t\t\t\t\tsnake_case :\t\t\t\tDict\t\t =pipe.encode_prompt(_snake_case )\n\n # inputs with prompt converted to embeddings\n snake_case :\t\t\t\tUnion[str, Any]\t\t ={\n '''prompt_embeds''': prompt_embeds,\n '''negative_prompt_embeds''': negative_prompt_embeds,\n '''generator''': generator,\n '''num_inference_steps''': num_inference_steps,\n '''output_type''': output_type,\n }\n\n if image is not None:\n snake_case :\t\t\t\tint\t\t =image\n\n if mask_image is not None:\n snake_case :\t\t\t\tList[str]\t\t =mask_image\n\n if original_image is not None:\n snake_case :\t\t\t\tAny\t\t =original_image\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(_snake_case,\t\t\t\t\t\t\t_snake_case,\t\t\t\t\t\t\t_snake_case )\n\n snake_case :\t\t\t\tAny\t\t =pipe(**_snake_case )[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(_snake_case )\n snake_case :\t\t\t\tint\t\t =self.pipeline_class.from_pretrained(_snake_case )\n pipe_loaded.to(_snake_case )\n pipe_loaded.set_progress_bar_config(disable=_snake_case )\n\n pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(_snake_case,\t\t\t\t\t\t\t_snake_case ) is None,\t\t\t\t\t\t\tf'''`{optional_component}` did not stay set to None after loading.''',\t\t\t\t\t\t\t)\n\n snake_case :\t\t\t\tList[Any]\t\t =self.get_dummy_inputs(_snake_case )\n\n snake_case :\t\t\t\tstr\t\t =inputs['''generator''']\n snake_case :\t\t\t\tList[str]\t\t =inputs['''num_inference_steps''']\n snake_case :\t\t\t\tTuple\t\t =inputs['''output_type''']\n\n # inputs with prompt converted to embeddings\n snake_case :\t\t\t\tOptional[int]\t\t ={\n '''prompt_embeds''': prompt_embeds,\n '''negative_prompt_embeds''': negative_prompt_embeds,\n '''generator''': generator,\n '''num_inference_steps''': num_inference_steps,\n '''output_type''': output_type,\n }\n\n if image is not None:\n snake_case :\t\t\t\tOptional[Any]\t\t =image\n\n if mask_image is not None:\n snake_case :\t\t\t\tTuple\t\t =mask_image\n\n if original_image is not None:\n snake_case :\t\t\t\tAny\t\t =original_image\n\n snake_case :\t\t\t\tOptional[int]\t\t =pipe_loaded(**_snake_case )[0]\n\n snake_case :\t\t\t\tstr\t\t =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()\n self.assertLess(_snake_case,\t\t\t\t\t\t\t1E-4 )\n\n\n\n\n\n\n def \t__snake_case (\t\t\t\t\t\t\tself\t\t\t\t\t\t\t:\t\t\t\t\t\t\tUnion[str, Any] ):\n\n\n\n '''simple docstring'''\n\n\n\n\n\n\n\n snake_case :\t\t\t\tList[str]\t\t =self.get_dummy_components()\n snake_case :\t\t\t\tOptional[Any]\t\t =self.pipeline_class(**_snake_case )\n pipe.to(_snake_case )\n pipe.set_progress_bar_config(disable=_snake_case )\n\n snake_case :\t\t\t\tstr\t\t =self.get_dummy_inputs(_snake_case )\n snake_case :\t\t\t\tOptional[int]\t\t =pipe(**_snake_case )[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(_snake_case )\n snake_case :\t\t\t\tUnion[str, Any]\t\t =self.pipeline_class.from_pretrained(_snake_case )\n pipe_loaded.to(_snake_case )\n pipe_loaded.set_progress_bar_config(disable=_snake_case )\n\n pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests\n\n snake_case :\t\t\t\tTuple\t\t =self.get_dummy_inputs(_snake_case )\n snake_case :\t\t\t\tint\t\t =pipe_loaded(**_snake_case )[0]\n\n snake_case :\t\t\t\tOptional[Any]\t\t =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()\n self.assertLess(_snake_case,\t\t\t\t\t\t\t1E-4 )\n\n\n\n\n\n"},"code_codestyle":{"kind":"number","value":349,"string":"349"},"style_context":{"kind":"string","value":"\n\n\n\n'''simple docstring'''\n\n\n\n\n\nimport math\n\nimport qiskit\ndef \t\t\t\t\t\t_a ( lowerCamelCase_ = 1\t,\t\tlowerCamelCase_ = 1\t,\t\tlowerCamelCase_ = 1\t\t\t\t):\n if (\n isinstance(lowerCamelCase_\t,\t\tlowerCamelCase_\t\t\t\t)\n or isinstance(lowerCamelCase_\t,\t\tlowerCamelCase_\t\t\t\t)\n or isinstance(lowerCamelCase_\t,\t\tlowerCamelCase_\t\t\t\t)\n ):\n raise TypeError('''inputs must be integers.'''\t\t\t\t)\n\n if (input_a < 0) or (input_a < 0) or (carry_in < 0):\n raise ValueError('''inputs must be positive.'''\t\t\t\t)\n\n if (\n (math.floor(lowerCamelCase_\t\t\t\t) != input_a)\n or (math.floor(lowerCamelCase_\t\t\t\t) != input_a)\n or (math.floor(lowerCamelCase_\t\t\t\t) != carry_in)\n ):\n raise ValueError('''inputs must be exact integers.'''\t\t\t\t)\n\n if (input_a > 2) or (input_a > 2) or (carry_in > 2):\n raise ValueError('''inputs must be less or equal to 2.'''\t\t\t\t)\n\n # build registers\n snake_case :\t\t\t\tList[Any]\t\t =qiskit.QuantumRegister(4\t,\t\t'''qr'''\t\t\t\t)\n snake_case :\t\t\t\tOptional[Any]\t\t =qiskit.ClassicalRegister(2\t,\t\t'''cr'''\t\t\t\t)\n # list the entries\n snake_case :\t\t\t\tAny\t\t =[input_a, input_a, carry_in]\n\n snake_case :\t\t\t\tList[str]\t\t =qiskit.QuantumCircuit(lowerCamelCase_\t,\t\tlowerCamelCase_\t\t\t\t)\n\n for i in range(0\t,\t\t3\t\t\t\t):\n if entry[i] == 2:\n quantum_circuit.h(lowerCamelCase_\t\t\t\t) # for hadamard entries\n elif entry[i] == 1:\n quantum_circuit.x(lowerCamelCase_\t\t\t\t) # for 1 entries\n elif entry[i] == 0:\n quantum_circuit.i(lowerCamelCase_\t\t\t\t) # for 0 entries\n\n # build the circuit\n quantum_circuit.ccx(0\t,\t\t1\t,\t\t3\t\t\t\t) # ccx = toffoli gate\n quantum_circuit.cx(0\t,\t\t1\t\t\t\t)\n quantum_circuit.ccx(1\t,\t\t2\t,\t\t3\t\t\t\t)\n quantum_circuit.cx(1\t,\t\t2\t\t\t\t)\n quantum_circuit.cx(0\t,\t\t1\t\t\t\t)\n\n quantum_circuit.measure([2, 3]\t,\t\tlowerCamelCase_\t\t\t\t) # measure the last two qbits\n\n snake_case :\t\t\t\tList[str]\t\t =qiskit.Aer.get_backend('''aer_simulator'''\t\t\t\t)\n snake_case :\t\t\t\tOptional[int]\t\t =qiskit.execute(lowerCamelCase_\t,\t\tlowerCamelCase_\t,\t\tshots=10_00\t\t\t\t)\n\n return job.result().get_counts(lowerCamelCase_\t\t\t\t)\n\n\nif __name__ == \"__main__\":\n print(f\"Total sum count for state is: {quantum_full_adder(1, 1, 1)}\")\n\n\n\n\n\n"},"style_context_codestyle":{"kind":"number","value":349,"string":"349"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":597,"cells":{"code":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom typing import Mapping\r\n\r\nfrom ...configuration_utils import PretrainedConfig\r\nfrom ...onnx import OnnxConfig\r\nfrom ...utils import logging\r\n\r\n\r\n_lowerCamelCase : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t\t\t= logging.get_logger(__name__)\r\n\r\n_lowerCamelCase : Tuple\t\t\t\t\t\t\t\t\t\t\t\t\t= {\r\n '''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',\r\n '''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',\r\n '''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',\r\n '''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',\r\n '''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',\r\n '''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',\r\n '''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',\r\n '''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',\r\n '''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',\r\n}\r\n\r\n\r\nclass \t\tlowerCamelCase (__lowerCamelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tUpperCAmelCase_ \t\t=\t\t\t\t\t\"xmod\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tdef __init__( self\t\t\t: Union[str, Any],\t\t\t\t_UpperCAmelCase\t\t\t: str=3_0_5_2_2,\t\t\t\t_UpperCAmelCase\t\t\t: List[str]=7_6_8,\t\t\t\t_UpperCAmelCase\t\t\t: str=1_2,\t\t\t\t_UpperCAmelCase\t\t\t: Tuple=1_2,\t\t\t\t_UpperCAmelCase\t\t\t: Dict=3_0_7_2,\t\t\t\t_UpperCAmelCase\t\t\t: List[Any]=\"gelu\",\t\t\t\t_UpperCAmelCase\t\t\t: List[Any]=0.1,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[Any]=0.1,\t\t\t\t_UpperCAmelCase\t\t\t: int=5_1_2,\t\t\t\t_UpperCAmelCase\t\t\t: Tuple=2,\t\t\t\t_UpperCAmelCase\t\t\t: Tuple=0.02,\t\t\t\t_UpperCAmelCase\t\t\t: Union[str, Any]=1E-12,\t\t\t\t_UpperCAmelCase\t\t\t: Union[str, Any]=1,\t\t\t\t_UpperCAmelCase\t\t\t: Any=0,\t\t\t\t_UpperCAmelCase\t\t\t: List[str]=2,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[Any]=\"absolute\",\t\t\t\t_UpperCAmelCase\t\t\t: int=True,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[Any]=None,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[Any]=False,\t\t\t\t_UpperCAmelCase\t\t\t: Any=2,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[int]=False,\t\t\t\t_UpperCAmelCase\t\t\t: List[str]=True,\t\t\t\t_UpperCAmelCase\t\t\t: Optional[int]=True,\t\t\t\t_UpperCAmelCase\t\t\t: int=(\"en_XX\",),\t\t\t\t_UpperCAmelCase\t\t\t: Optional[Any]=None,\t\t\t\t**_UpperCAmelCase\t\t\t: Union[str, Any],\t\t\t\t) ->\t\t\t\tList[Any]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsuper().__init__(pad_token_id=_UpperCAmelCase,\t\t\t\tbos_token_id=_UpperCAmelCase,\t\t\t\teos_token_id=_UpperCAmelCase,\t\t\t\t**_UpperCAmelCase\t\t)\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\tvocab_size\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\thidden_size\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\tnum_hidden_layers\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\tnum_attention_heads\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\thidden_act\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\tintermediate_size\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\thidden_dropout_prob\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\tattention_probs_dropout_prob\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\tmax_position_embeddings\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\ttype_vocab_size\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tinitializer_range\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\tlayer_norm_eps\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\tposition_embedding_type\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\tuse_cache\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tclassifier_dropout\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\tpre_norm\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tadapter_reduction_factor\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\tadapter_layer_norm\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tadapter_reuse_layer_norm\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\tln_before_adapter\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(_UpperCAmelCase\t\t)\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\tdefault_language\r\n\r\n\r\nclass \t\tlowerCamelCase (__lowerCamelCase ):\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t@property\r\n\t\t\t\t\t\t\tdef A_\t\t\t( self\t\t\t: int\t\t) ->\t\t\t\tMapping[str, Mapping[int, str]]:\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"\"\"simple docstring\"\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif self.task == \"multiple-choice\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\t{0: \"batch\", 1: \"choice\", 2: \"sequence\"}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\t{0: \"batch\", 1: \"sequence\"}\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn OrderedDict(\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t [\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\"input_ids\", dynamic_axis),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t (\"attention_mask\", dynamic_axis),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ]\t\t)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"code_codestyle":{"kind":"number","value":157,"string":"157"},"style_context":{"kind":"string","value":"\r\n\r\n\r\n\r\n\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom collections.abc import Sequence\r\nfrom typing import Literal\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tstr\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tstr ) -> str | Literal[False]:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif lista[i] != lista[i]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcount += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\t\"_\"\r\n\t\t\t\t\t\t\tif count > 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn False\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn \"\".join(SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tlist[str] ) -> list[str]:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\twhile True:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t[\"$\"] * len(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Dict\t\t\t\t\t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(i + 1\t\t\t\t, len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\tcompare_string(binary[i]\t\t\t\t, binary[j] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif k is False:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\t\"*\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t\"*\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemp.append(\"X\" )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif checka[i] == \"$\":\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpi.append(binary[i] )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif len(SCREAMING_SNAKE_CASE__ ) == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn pi\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(set(SCREAMING_SNAKE_CASE__ ) )\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tint\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tSequence[float] ) -> list[str]:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\tfor minterm in minterms:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\t\"\"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor _ in range(SCREAMING_SNAKE_CASE__ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\tstr(minterm % 2 ) + string\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tminterm //= 2\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemp.append(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\treturn temp\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tstr\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tstr\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tint ) -> bool:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tlist(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif lista[i] != lista[i]:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcount_n += 1\r\n\t\t\t\t\t\t\treturn count_n == count\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tlist[list[int]]\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tlist[str] ) -> list[str]:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\t[]\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t[0] * len(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tfor i in range(len(chart[0] ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Any\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t-1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif chart[j][i] == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcount += 1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tj\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif count == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\t1\r\n\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif select[i] == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(len(chart[0] ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif chart[i][j] == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor k in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemp.append(prime_implicants[i] )\r\n\t\t\t\t\t\t\twhile True:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t-1\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : int\t\t\t\t\t\t\t\t\t\t\t=\t\tchart[i].count(1 )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif count_n > max_n:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tcount_n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\ti\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tif max_n == 0:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\treturn temp\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttemp.append(prime_implicants[rem] )\r\n\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(chart[0] ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif chart[rem][i] == 1:\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t0\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tlist[str]\t\t\t\t, SCREAMING_SNAKE_CASE__ :\t\t\t\t\t\tlist[str] ) -> list[list[int]]:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t[[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]\r\n\t\t\t\t\t\t\tfor i in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\tprime_implicants[i].count(\"_\" )\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tfor j in range(len(SCREAMING_SNAKE_CASE__ ) ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tif is_for_table(prime_implicants[i]\t\t\t\t, binary[j]\t\t\t\t, SCREAMING_SNAKE_CASE__ ):\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Union[str, Any]\t\t\t\t\t\t\t\t\t\t\t=\t\t1\r\n\r\n\t\t\t\t\t\t\treturn chart\r\n\r\n\r\n\r\n\r\ndef _a (\t\t\t\t\t\t) -> None:\r\n\r\n\t\t\t\t\t\t\t'''simple docstring'''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\tint(input(\"Enter the no. of variables\\n\" ) )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Optional[int]\t\t\t\t\t\t\t\t\t\t\t=\t\t[\r\n\t\t\t\t\t\t\t float(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\t for x in input(\r\n\t\t\t\t\t\t\t \"Enter the decimal representation of Minterms 'Spaces Separated'\\n\" ).split()\r\n\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tdecimal_to_binary(SCREAMING_SNAKE_CASE__\t\t\t\t, SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : Tuple\t\t\t\t\t\t\t\t\t\t\t=\t\tcheck(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tprint(\"Prime Implicants are:\" )\r\n\t\t\t\t\t\t\tprint(SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : List[str]\t\t\t\t\t\t\t\t\t\t\t=\t\tprime_implicant_chart(SCREAMING_SNAKE_CASE__\t\t\t\t, SCREAMING_SNAKE_CASE__ )\r\n\r\n\t\t\t\t\t\t\tSCREAMING_SNAKE_CASE__ : str\t\t\t\t\t\t\t\t\t\t\t=\t\tselection(SCREAMING_SNAKE_CASE__\t\t\t\t, SCREAMING_SNAKE_CASE__ )\r\n\t\t\t\t\t\t\tprint(\"Essential Prime Implicants are:\" )\r\n\t\t\t\t\t\t\tprint(SCREAMING_SNAKE_CASE__ )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\t\timport doctest\r\n\r\n\t\tdoctest.testmod()\r\n\t\tmain()\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"style_context_codestyle":{"kind":"number","value":157,"string":"157"},"label":{"kind":"number","value":1,"string":"1"}}},{"rowIdx":598,"cells":{"code":{"kind":"string","value":"\r\n\r\nclass _a\t\t\t\t\t:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__( self\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tint\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tAny\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tDict\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tNone\r\n A_\t\t\t\t\t\t =\t\t\t\tNone\r\n A_\t\t\t\t\t\t =\t\t\t\tgraph\r\n\r\n self._normalize_graph(UpperCAmelCase\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t)\r\n A_\t\t\t\t\t\t =\t\t\t\tlen(UpperCAmelCase\t)\r\n A_\t\t\t\t\t\t =\t\t\t\tNone\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tTuple\t):\r\n if sources is int:\r\n A_\t\t\t\t\t\t =\t\t\t\t[sources]\r\n if sinks is int:\r\n A_\t\t\t\t\t\t =\t\t\t\t[sinks]\r\n\r\n if len(UpperCAmelCase\t) == 0 or len(UpperCAmelCase\t) == 0:\r\n return\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\tsources[0]\r\n A_\t\t\t\t\t\t =\t\t\t\tsinks[0]\r\n\r\n # make fake vertex if there are more\r\n # than one source or sink\r\n if len(UpperCAmelCase\t) > 1 or len(UpperCAmelCase\t) > 1:\r\n A_\t\t\t\t\t\t =\t\t\t\t0\r\n for i in sources:\r\n max_input_flow += sum(self.graph[i]\t)\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\tlen(self.graph\t) + 1\r\n for room in self.graph:\r\n room.insert(0\t\t\t\t\t\t,\t\t\t0\t)\r\n self.graph.insert(0\t\t\t\t\t\t,\t\t\t[0] * size\t)\r\n for i in sources:\r\n A_\t\t\t\t\t\t =\t\t\t\tmax_input_flow\r\n A_\t\t\t\t\t\t =\t\t\t\t0\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\tlen(self.graph\t) + 1\r\n for room in self.graph:\r\n room.append(0\t)\r\n self.graph.append([0] * size\t)\r\n for i in sinks:\r\n A_\t\t\t\t\t\t =\t\t\t\tmax_input_flow\r\n A_\t\t\t\t\t\t =\t\t\t\tsize - 1\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tstr\t):\r\n if self.maximum_flow_algorithm is None:\r\n raise Exception(\"You need to set maximum flow algorithm before.\"\t)\r\n if self.source_index is None or self.sink_index is None:\r\n return 0\r\n\r\n self.maximum_flow_algorithm.execute()\r\n return self.maximum_flow_algorithm.getMaximumFlow()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tList[Any]\t):\r\n A_\t\t\t\t\t\t =\t\t\t\talgorithm(self\t)\r\n\r\n\r\n\r\n\r\nclass _a\t\t\t\t\t:\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__( self\t\t\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tList[str]\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tflow_network\r\n A_\t\t\t\t\t\t =\t\t\t\tflow_network.verticesCount\r\n A_\t\t\t\t\t\t =\t\t\t\tflow_network.sourceIndex\r\n A_\t\t\t\t\t\t =\t\t\t\tflow_network.sinkIndex\r\n # it's just a reference, so you shouldn't change\r\n # it in your algorithms, use deep copy before doing that\r\n A_\t\t\t\t\t\t =\t\t\t\tflow_network.graph\r\n A_\t\t\t\t\t\t =\t\t\t\tFalse\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tOptional[int]\t):\r\n if not self.executed:\r\n self._algorithm()\r\n A_\t\t\t\t\t\t =\t\t\t\tTrue\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tDict\t):\r\n pass\r\n\r\n\r\n\r\n\r\nclass _a\t\t\t\t\t(\t\t\tsnake_case_\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__( self\t\t\t\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tList[Any]\t):\r\n super().__init__(UpperCAmelCase\t)\r\n # use this to save your result\r\n A_\t\t\t\t\t\t =\t\t\t\t-1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tTuple\t):\r\n if not self.executed:\r\n raise Exception(\"You should execute algorithm before using its result!\"\t)\r\n\r\n return self.maximum_flow\r\n\r\n\r\n\r\n\r\nclass _a\t\t\t\t\t(\t\t\tsnake_case_\t\t\t\t):\r\n\r\n\r\n\r\n\r\n \"\"\"simple docstring\"\"\"\r\n\r\n def __init__( self\t\t\t\t\t\t\t:\t\t\t\tTuple\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tUnion[str, Any]\t):\r\n super().__init__(UpperCAmelCase\t)\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\t[[0] * self.verticies_count for i in range(self.verticies_count\t)]\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\t[0] * self.verticies_count\r\n A_\t\t\t\t\t\t =\t\t\t\t[0] * self.verticies_count\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tList[str]\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tself.verticies_count\r\n\r\n # push some substance to graph\r\n for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]\t):\r\n self.preflow[self.source_index][nextvertex_index] += bandwidth\r\n self.preflow[nextvertex_index][self.source_index] -= bandwidth\r\n self.excesses[nextvertex_index] += bandwidth\r\n\r\n # Relabel-to-front selection rule\r\n A_\t\t\t\t\t\t =\t\t\t\t[\r\n i\r\n for i in range(self.verticies_count\t)\r\n if i != self.source_index and i != self.sink_index\r\n ]\r\n\r\n # move through list\r\n A_\t\t\t\t\t\t =\t\t\t\t0\r\n while i < len(UpperCAmelCase\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tvertices_list[i]\r\n A_\t\t\t\t\t\t =\t\t\t\tself.heights[vertex_index]\r\n self.process_vertex(UpperCAmelCase\t)\r\n if self.heights[vertex_index] > previous_height:\r\n # if it was relabeled, swap elements\r\n # and start from 0 index\r\n vertices_list.insert(0\t\t\t\t\t\t,\t\t\tvertices_list.pop(UpperCAmelCase\t)\t)\r\n A_\t\t\t\t\t\t =\t\t\t\t0\r\n else:\r\n i += 1\r\n\r\n A_\t\t\t\t\t\t =\t\t\t\tsum(self.preflow[self.source_index]\t)\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tList[str]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tDict\t):\r\n while self.excesses[vertex_index] > 0:\r\n for neighbour_index in range(self.verticies_count\t):\r\n # if it's neighbour and current vertex is higher\r\n if (\r\n self.graph[vertex_index][neighbour_index]\r\n - self.preflow[vertex_index][neighbour_index]\r\n > 0\r\n and self.heights[vertex_index] > self.heights[neighbour_index]\r\n ):\r\n self.push(UpperCAmelCase\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t)\r\n\r\n self.relabel(UpperCAmelCase\t)\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tDict\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tstr\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tmin(\r\n self.excesses[from_index]\t\t\t\t\t\t,\t\t\tself.graph[from_index][to_index] - self.preflow[from_index][to_index]\t\t\t\t\t\t,\t\t\t)\r\n self.preflow[from_index][to_index] += preflow_delta\r\n self.preflow[to_index][from_index] -= preflow_delta\r\n self.excesses[from_index] -= preflow_delta\r\n self.excesses[to_index] += preflow_delta\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n def \t\t__A\t( self\t\t\t\t\t\t\t:\t\t\t\tOptional[Any]\t\t\t\t\t\t,\t\t\tUpperCAmelCase\t\t\t\t\t\t\t:\t\t\t\tList[Any]\t):\r\n A_\t\t\t\t\t\t =\t\t\t\tNone\r\n for to_index in range(self.verticies_count\t):\r\n if (\r\n self.graph[vertex_index][to_index]\r\n - self.preflow[vertex_index][to_index]\r\n > 0\r\n ) and (min_height is None or self.heights[to_index] < min_height):\r\n A_\t\t\t\t\t\t =\t\t\t\tself.heights[to_index]\r\n\r\n if min_height is not None:\r\n A_\t\t\t\t\t\t =\t\t\t\tmin_height + 1\r\n\r\n\r\nif __name__ == \"__main__\":\r\n __a :Tuple =\t\t\t[0]\r\n __a :Tuple =\t\t\t[3]\r\n # graph = [\r\n # [0, 0, 4, 6, 0, 0],\r\n # [0, 0, 5, 2, 0, 0],\r\n # [0, 0, 0, 0, 4, 4],\r\n # [0, 0, 0, 0, 6, 6],\r\n # [0, 0, 0, 0, 0, 0],\r\n # [0, 0, 0, 0, 0, 0],\r\n # ]\r\n __a :List[str] =\t\t\t[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]\r\n\r\n # prepare our network\r\n __a :List[str] =\t\t\tFlowNetwork(graph, entrances, exits)\r\n # set algorithm\r\n flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)\r\n # and calculate\r\n __a :List[Any] =\t\t\tflow_network.find_maximum_flow()\r\n\r\n print(F\"maximum flow is {maximum_flow}\")"},"code_codestyle":{"kind":"number","value":86,"string":"86"},"style_context":{"kind":"string","value":"\n\n\n\n\n\nimport unittest\n\nimport numpy as np\nimport requests\n\nfrom transformers.testing_utils import require_torch, require_vision\nfrom transformers.utils import is_torch_available, is_vision_available\n\nfrom ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs\n\n\nif is_torch_available():\n import torch\n\n from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11\nelse:\n _lowerCamelCase \t\t\t\t\t\t\t= False\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import PixaStructImageProcessor\n\n\n\n\n\n\n\nclass __A\t\t\t\t\t\t\t( unittest.TestCase\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n def __init__(\t\tself ,\t\t\t\ta__ ,\t\t\t\ta__=7 ,\t\t\t\ta__=3 ,\t\t\t\ta__=18 ,\t\t\t\ta__=30 ,\t\t\t\ta__=400 ,\t\t\t\ta__=None ,\t\t\t\ta__=True ,\t\t\t\ta__=True ,\t\t\t\ta__=None ,\t\t\t\t):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tsize if size is not None else {'''height''': 20, '''width''': 20}\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\tparent\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tbatch_size\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\tnum_channels\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\timage_size\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tmin_resolution\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\tmax_resolution\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tsize\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tdo_normalize\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tdo_convert_rgb\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\t[512, 1024, 2048, 4096]\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tpatch_size if patch_size is not None else {'''height''': 16, '''width''': 16}\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n return {\"do_normalize\": self.do_normalize, \"do_convert_rgb\": self.do_convert_rgb}\n\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\t'''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\tImage.open(requests.get(a__ ,\t\t\t\tstream=a__).raw).convert('''RGB''')\n return raw_image\n\n\n\n\n\n\n\n@unittest.skipIf(\n not is_torch_greater_or_equal_than_1_11\t\t\t,reason=\"\"\"`Pix2StructImageProcessor` requires `torch>=1.11.0`.\"\"\"\t\t\t,)\n@require_torch\n@require_vision\nclass __A\t\t\t\t\t\t\t( lowerCamelCase__\t\t\t,unittest.TestCase\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n UpperCAmelCase__\t\t\t\t\t\t\t= PixaStructImageProcessor if is_vision_available() else None\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\tPixaStructImageProcessingTester(self)\n\n\n @property\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n return self.image_processor_tester.prepare_image_processor_dict()\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n self.assertTrue(hasattr(a__ ,\t\t\t\t'''do_normalize'''))\n self.assertTrue(hasattr(a__ ,\t\t\t\t'''do_convert_rgb'''))\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tself.image_processor_tester.prepare_dummy_image()\n\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\t2048\n\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\timage_processor(a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__)\n self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,\t\t\t\ttorch.tensor(0.0606) ,\t\t\t\tatol=1e-3 ,\t\t\t\trtol=1e-3))\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester ,\t\t\t\tequal_resolution=a__)\n for image in image_inputs:\n self.assertIsInstance(a__ ,\t\t\t\tImage.Image)\n\n # Test not batched input\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\t(\n (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])\n * self.image_processor_tester.num_channels\n ) + 2\n\n for max_patch in self.image_processor_tester.max_patches:\n # Test not batched input\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(1, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n # Test batched\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\timage_processor(\n a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester ,\t\t\t\tequal_resolution=a__)\n for image in image_inputs:\n self.assertIsInstance(a__ ,\t\t\t\tImage.Image)\n\n # Test not batched input\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\t(\n (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])\n * self.image_processor_tester.num_channels\n ) + 2\n\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\tTrue\n\n for max_patch in self.image_processor_tester.max_patches:\n # Test not batched input\n with self.assertRaises(a__):\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\t'''Hello'''\n\n _lowerCamelCase\t\t\t\t: int\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__ ,\t\t\t\theader_text=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(1, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n # Test batched\n _lowerCamelCase\t\t\t\t: Dict\t\t\t\t\t\t=\t\timage_processor(\n a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__ ,\t\t\t\theader_text=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n # create random numpy tensors\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester ,\t\t\t\tequal_resolution=a__ ,\t\t\t\tnumpify=a__)\n for image in image_inputs:\n self.assertIsInstance(a__ ,\t\t\t\tnp.ndarray)\n\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\t(\n (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])\n * self.image_processor_tester.num_channels\n ) + 2\n\n for max_patch in self.image_processor_tester.max_patches:\n # Test not batched input\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(1, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n # Test batched\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\timage_processor(\n a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n # create random PyTorch tensors\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester ,\t\t\t\tequal_resolution=a__ ,\t\t\t\ttorchify=a__)\n for image in image_inputs:\n self.assertIsInstance(a__ ,\t\t\t\ttorch.Tensor)\n\n # Test not batched input\n _lowerCamelCase\t\t\t\t: Tuple\t\t\t\t\t\t=\t\t(\n (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])\n * self.image_processor_tester.num_channels\n ) + 2\n\n for max_patch in self.image_processor_tester.max_patches:\n # Test not batched input\n _lowerCamelCase\t\t\t\t: Optional[int]\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(1, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n # Test batched\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\timage_processor(\n a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n\n\n\n\n\n\n@unittest.skipIf(\n not is_torch_greater_or_equal_than_1_11\t\t\t,reason=\"\"\"`Pix2StructImageProcessor` requires `torch>=1.11.0`.\"\"\"\t\t\t,)\n@require_torch\n@require_vision\nclass __A\t\t\t\t\t\t\t( lowerCamelCase__\t\t\t,unittest.TestCase\t):\n\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n\n\n UpperCAmelCase__\t\t\t\t\t\t\t= PixaStructImageProcessor if is_vision_available() else None\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: Optional[Any]\t\t\t\t\t\t=\t\tPixaStructImageProcessingTester(self ,\t\t\t\tnum_channels=4)\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\t3\n\n\n @property\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n return self.image_processor_tester.prepare_image_processor_dict()\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: str\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n self.assertTrue(hasattr(a__ ,\t\t\t\t'''do_normalize'''))\n self.assertTrue(hasattr(a__ ,\t\t\t\t'''do_convert_rgb'''))\n\n\n\n def \t\t\t\t\t__snake_case (\t\tself):\n\n\n\n \"\"\"simple docstring\"\"\"\n\n\n\n\n\n _lowerCamelCase\t\t\t\t: List[str]\t\t\t\t\t\t=\t\tself.image_processing_class(**self.image_processor_dict)\n # create random PIL images\n _lowerCamelCase\t\t\t\t: Any\t\t\t\t\t\t=\t\tprepare_image_inputs(self.image_processor_tester ,\t\t\t\tequal_resolution=a__)\n for image in image_inputs:\n self.assertIsInstance(a__ ,\t\t\t\tImage.Image)\n\n # Test not batched input\n _lowerCamelCase\t\t\t\t: List[Any]\t\t\t\t\t\t=\t\t(\n (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])\n * (self.image_processor_tester.num_channels - 1)\n ) + 2\n\n for max_patch in self.image_processor_tester.max_patches:\n # Test not batched input\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\timage_processor(\n image_inputs[0] ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(1, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n # Test batched\n _lowerCamelCase\t\t\t\t: Union[str, Any]\t\t\t\t\t\t=\t\timage_processor(\n a__ ,\t\t\t\treturn_tensors='''pt''' ,\t\t\t\tmax_patches=a__).flattened_patches\n self.assertEqual(\n encoded_images.shape ,\t\t\t\t(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,\t\t\t\t)\n\n"},"style_context_codestyle":{"kind":"number","value":114,"string":"114"},"label":{"kind":"number","value":0,"string":"0"}}},{"rowIdx":599,"cells":{"code":{"kind":"string","value":"\r\r\rimport os\rfrom typing import Dict, List, Tuple, TypeVar, Union\r\r\r__SCREAMING_SNAKE_CASE : Any \t\t\t= TypeVar('''T''')\r\r__SCREAMING_SNAKE_CASE : Any \t\t\t= Union[List[T], Tuple[T, ...]]\r__SCREAMING_SNAKE_CASE : str \t\t\t= Union[T, List[T], Dict[str, T]]\r__SCREAMING_SNAKE_CASE : Optional[int] \t\t\t= Union[str, bytes, os.PathLike]\r\r"},"code_codestyle":{"kind":"number","value":149,"string":"149"},"style_context":{"kind":"string","value":"\r\r\rdef \t\tsnake_case_ (\t\t\t\t\t\t\tlowercase__ :\t\t\t\t\t\tint ):\r\r\r\r\r\r\r\t\t\t\t'''simple docstring'''\r\r\r\r\r\r\r\t\t\t\t_lowerCAmelCase \t=n ** (1 / 3)\r\t\t\t\treturn (val * val * val) == n\r\r\rif __name__ == \"__main__\":\r\t\tprint(perfect_cube(27))\r\t\tprint(perfect_cube(4))\r\r"},"style_context_codestyle":{"kind":"number","value":149,"string":"149"},"label":{"kind":"number","value":1,"string":"1"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":5,"numItemsPerPage":100,"numTotalItems":307988,"offset":500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjI4NzAyMSwic3ViIjoiL2RhdGFzZXRzL2luZmluaXR5b2ZzcGFjZS9weXRob25fY29kZXN0eWxlcy1taXhlZDEtMWsiLCJleHAiOjE3NTYyOTA2MjEsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.j5oGXRPODXi9len2gt1SoElas8pBV65tP9IPpheqURZirE5XdDjQ191HarrfD4AerFpygigrOQGMkZyql-veBw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">

code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer lowerCAmelCase_ : Union[str, Any] = 'bart' lowerCAmelCase_ : int = True @st.cache(allow_output_mutation=_snake_case ) def __A ( ) -> Optional[Any]: '''simple docstring''' if LOAD_DENSE_INDEX: _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) _UpperCamelCase : Any = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) _UpperCamelCase : List[Any] = qar_model.eval() else: _UpperCamelCase : Union[str, Any] = (None, None) if MODEL_TYPE == "bart": _UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) _UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) _UpperCamelCase : Tuple = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) _UpperCamelCase : int = sas_model.eval() else: _UpperCamelCase : List[str] = make_qa_sas_model( model_name="t5-small" ,from_file="seq2seq_models/eli5_t5_model_1024_4.pth" ,device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_snake_case ) def __A ( ) -> Dict: '''simple docstring''' if LOAD_DENSE_INDEX: _UpperCamelCase : int = faiss.StandardGpuResources() _UpperCamelCase : Dict = datasets.load_dataset(path="wiki_snippets" ,name="wiki40b_en_100_0" )["""train"""] _UpperCamelCase : Any = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" ,dtype="float32" ,mode="r" ,shape=(wikiaab_passages.num_rows, 1_2_8) ,) _UpperCamelCase : Dict = faiss.IndexFlatIP(1_2_8 ) _UpperCamelCase : int = faiss.index_cpu_to_gpu(_snake_case ,1 ,_snake_case ) wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU else: _UpperCamelCase : Optional[int] = (None, None) _UpperCamelCase : Any = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_snake_case ) def __A ( ) -> Dict: '''simple docstring''' _UpperCamelCase : List[str] = datasets.load_dataset("eli5" ,name="LFQA_reddit" ) _UpperCamelCase : str = elia["""train_eli5"""] _UpperCamelCase : Any = np.memmap( "eli5_questions_reps.dat" ,dtype="float32" ,mode="r" ,shape=(elia_train.num_rows, 1_2_8) ) _UpperCamelCase : List[str] = faiss.IndexFlatIP(1_2_8 ) eli5_train_q_index.add(_snake_case ) return (elia_train, eli5_train_q_index) lowerCAmelCase_ : Dict = load_indexes() lowerCAmelCase_ : List[str] = load_models() lowerCAmelCase_ : Optional[int] = load_train_data() def __A ( UpperCAmelCase ,UpperCAmelCase=1_0 ) -> Tuple: '''simple docstring''' _UpperCamelCase : List[Any] = embed_questions_for_retrieval([question] ,_snake_case ,_snake_case ) _UpperCamelCase : List[str] = eli5_train_q_index.search(_snake_case ,_snake_case ) _UpperCamelCase : Tuple = [elia_train[int(_snake_case )] for i in I[0]] return nn_examples def __A ( UpperCAmelCase ,UpperCAmelCase="wiki40b" ,UpperCAmelCase="dense" ,UpperCAmelCase=1_0 ) -> Optional[Any]: '''simple docstring''' if source == "none": _UpperCamelCase : Optional[int] = (""" <P> """.join(["" for _ in range(1_1 )] ).strip(), []) else: if method == "dense": _UpperCamelCase : Dict = query_qa_dense_index( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) else: _UpperCamelCase : str = query_es_index( _snake_case ,_snake_case ,index_name="english_wiki40b_snippets_100w" ,n_results=_snake_case ,) _UpperCamelCase : Union[str, Any] = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] _UpperCamelCase : Optional[int] = """question: {} context: {}""".format(_snake_case ,_snake_case ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase : None), } ) def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=6_4 ,UpperCAmelCase=2_5_6 ,UpperCAmelCase=False ,UpperCAmelCase=2 ,UpperCAmelCase=0.95 ,UpperCAmelCase=0.8 ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): _UpperCamelCase : Optional[Any] = qa_sas_generate( _snake_case ,_snake_case ,_snake_case ,num_answers=1 ,num_beams=_snake_case ,min_len=_snake_case ,max_len=_snake_case ,do_sample=_snake_case ,temp=_snake_case ,top_p=_snake_case ,top_k=_snake_case ,max_input_length=1_0_2_4 ,device="cuda:0" ,)[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar lowerCAmelCase_ : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' lowerCAmelCase_ : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia lowerCAmelCase_ : List[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) lowerCAmelCase_ : List[Any] = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] lowerCAmelCase_ : str = st.sidebar.checkbox("""Demo options""") if demo_options: lowerCAmelCase_ : Optional[int] = st.sidebar.selectbox( """""", action_list, index=3, ) lowerCAmelCase_ : Optional[Any] = action_list.index(action_st) lowerCAmelCase_ : str = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) lowerCAmelCase_ : Any = show_type == 'Show full text of passages' else: lowerCAmelCase_ : Union[str, Any] = 3 lowerCAmelCase_ : Union[str, Any] = True lowerCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: lowerCAmelCase_ : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) lowerCAmelCase_ : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) lowerCAmelCase_ : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: lowerCAmelCase_ : Optional[Any] = 'wiki40b' lowerCAmelCase_ : Tuple = 'dense' lowerCAmelCase_ : Tuple = 'beam' lowerCAmelCase_ : int = 2 lowerCAmelCase_ : Tuple = 64 lowerCAmelCase_ : List[str] = 256 lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : Optional[int] = None lowerCAmelCase_ : str = st.sidebar.checkbox("""Generation options""") if generate_options: lowerCAmelCase_ : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) lowerCAmelCase_ : Tuple = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) lowerCAmelCase_ : List[str] = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) lowerCAmelCase_ : Optional[Any] = st.sidebar.slider( """Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": lowerCAmelCase_ : str = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: lowerCAmelCase_ : Tuple = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) lowerCAmelCase_ : Tuple = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) lowerCAmelCase_ : Dict = None # start main text lowerCAmelCase_ : Tuple = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] lowerCAmelCase_ : List[Any] = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": lowerCAmelCase_ : str = st.text_input("""Enter your question here:""", """""") else: lowerCAmelCase_ : int = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": lowerCAmelCase_ : Optional[Any] = make_support(question, source=wiki_source, method="""dense""", n_results=10) lowerCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10) lowerCAmelCase_ : List[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] lowerCAmelCase_ : List[Any] = support_list[:10] lowerCAmelCase_ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: lowerCAmelCase_ : int = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: lowerCAmelCase_ : Tuple = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): lowerCAmelCase_ : Optional[Any] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(""" """, """_""")) lowerCAmelCase_ : int = res[1].strip() if sec_titles == "": lowerCAmelCase_ : List[str] = '[{}]({})'.format(res[0], wiki_url) else: lowerCAmelCase_ : List[Any] = sec_titles.split(""" & """) lowerCAmelCase_ : List[str] = ' & '.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: lowerCAmelCase_ : int = find_nearest_training(question) lowerCAmelCase_ : Any = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) lowerCAmelCase_ : int = [ '{}. {}'.format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) lowerCAmelCase_ : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
435
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = {'vocab_file': 'spiece.model'} UpperCAmelCase__ : Tuple = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } UpperCAmelCase__ : Optional[int] = { 'google/bigbird-roberta-base': 4_0_9_6, 'google/bigbird-roberta-large': 4_0_9_6, 'google/bigbird-base-trivia-itc': 4_0_9_6, } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask'''] __UpperCamelCase : List[int] = [] def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token SCREAMING_SNAKE_CASE__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file SCREAMING_SNAKE_CASE__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __magic_name__ (self ) -> Tuple: """simple docstring""" return self.sp_model.get_piece_size() def __magic_name__ (self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__(self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Tuple = None return state def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE__ : Optional[Any] = {} SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ : List[str] = """""" SCREAMING_SNAKE_CASE__ : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Optional[Any] = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : str = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] sub_texts.append(SCREAMING_SNAKE_CASE__ ) else: current_sub_text.append(SCREAMING_SNAKE_CASE__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(SCREAMING_SNAKE_CASE__ ) ) else: SCREAMING_SNAKE_CASE__ : Any = """""".join(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ ) return clean_text else: return text def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi: SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,) def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
223
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__:Any = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Dict = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
720
"""simple docstring""" from __future__ import annotations def _lowerCamelCase( a , a , a ): if len(a ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(a ) or left < -len(a ) or right >= len(a ) or right < -len(a ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] __a = (left + right) >> 1 # the middle __a = find_max(a , a , a ) # find max in range[left, mid] __a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
67
0
'''simple docstring''' import requests from bsa import BeautifulSoup def a ( UpperCamelCase_ : Union[str, Any] = "https://www.worldometers.info/coronavirus" ) -> dict: snake_case__ =BeautifulSoup(requests.get(_lowercase ).text , 'html.parser' ) snake_case__ =soup.findAll('h1' ) snake_case__ =soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_lowercase , _lowercase )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
538
'''simple docstring''' import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def lowercase_ ( _lowercase , _lowercase=False ) -> Dict: '''simple docstring''' lowerCamelCase_ : Tuple = OmegaConf.load(_lowercase ) if display: print(yaml.dump(OmegaConf.to_container(_lowercase ) ) ) return config def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None ) -> Optional[int]: '''simple docstring''' if conf_path is None: lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.yaml''' lowerCamelCase_ : Dict = load_config(_lowercase , display=_lowercase ) lowerCamelCase_ : List[str] = VQModel(**config.model.params ) if ckpt_path is None: lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.pt''' lowerCamelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase ) if ".ckpt" in ckpt_path: lowerCamelCase_ : str = sd['''state_dict'''] model.load_state_dict(_lowercase , strict=_lowercase ) model.to(_lowercase ) del sd return model def lowercase_ ( _lowercase , _lowercase ) -> List[str]: '''simple docstring''' lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = model.encode(_lowercase ) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) lowerCamelCase_ : Any = model.decode(_lowercase ) return xrec def lowercase_ ( _lowercase , _lowercase=False ) -> Any: '''simple docstring''' lowerCamelCase_, lowerCamelCase_ : Any = string.rsplit('''.''' , 1 ) if reload: lowerCamelCase_ : int = importlib.import_module(_lowercase ) importlib.reload(_lowercase ) return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls ) def lowercase_ ( _lowercase ) -> List[str]: '''simple docstring''' if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''' ) return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) ) def lowercase_ ( _lowercase , _lowercase , _lowercase=True , _lowercase=True ) -> Any: '''simple docstring''' lowerCamelCase_ : int = instantiate_from_config(_lowercase ) if sd is not None: model.load_state_dict(_lowercase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple: '''simple docstring''' if ckpt: lowerCamelCase_ : List[Any] = torch.load(_lowercase , map_location='''cpu''' ) lowerCamelCase_ : int = pl_sd['''global_step'''] print(F"""loaded model from global step {global_step}.""" ) else: lowerCamelCase_ : Optional[int] = {'''state_dict''': None} lowerCamelCase_ : str = None lowerCamelCase_ : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowercase , eval_mode=_lowercase )['''model'''] return model, global_step
422
0
'''simple docstring''' from math import pi def lowercase (_A , _A ): """simple docstring""" return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(90, 10))
716
'''simple docstring''' import math from datetime import datetime, timedelta def lowercase (_A ): """simple docstring""" _lowerCAmelCase : Optional[Any] = year % 1_9 _lowerCAmelCase : Any = year % 4 _lowerCAmelCase : Optional[int] = year % 7 _lowerCAmelCase : int = math.floor(year / 1_0_0 ) _lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4 _lowerCAmelCase : Dict = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _lowerCAmelCase : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(_A , 4 , 1_8 ) else: return datetime(_A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was""" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
630
0
from datetime import datetime import requests def snake_case (UpperCAmelCase__ ) -> bytes: UpperCamelCase_: Any = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' UpperCamelCase_: int = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(UpperCAmelCase__ ).content if __name__ == "__main__": A_ : Union[str, Any] = input('Enter Video/IGTV url: ').strip() A_ : int = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(F'''Done. Video saved to disk as {file_name}.''')
57
"""simple docstring""" from collections import namedtuple __snake_case : Optional[int] = namedtuple('from_to', 'from_ to') __snake_case : Union[str, Any] = { 'cubicmeter': from_to(1, 1), 'litre': from_to(0.001, 1_000), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.00_454, 264.172), 'cubicyard': from_to(0.76_455, 1.30_795), 'cubicfoot': from_to(0.028, 35.3_147), 'cup': from_to(0.000_236_588, 4_226.75), } def a_ ( __a , __a , __a ): if from_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n''' + ''', '''.join(__a ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n''' + ''', '''.join(__a ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
571
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : str = logging.get_logger(__name__) __A : Tuple = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class UpperCAmelCase_ ( A ): '''simple docstring''' a__ = '''segformer''' def __init__( self : Optional[Any] , a : Optional[int]=3 , a : int=4 , a : Tuple=[2, 2, 2, 2] , a : str=[8, 4, 2, 1] , a : Union[str, Any]=[32, 64, 160, 256] , a : Dict=[7, 3, 3, 3] , a : Optional[Any]=[4, 2, 2, 2] , a : List[str]=[1, 2, 5, 8] , a : int=[4, 4, 4, 4] , a : Any="gelu" , a : Optional[Any]=0.0 , a : Any=0.0 , a : Any=0.1 , a : Union[str, Any]=0.02 , a : int=0.1 , a : Tuple=1E-6 , a : Optional[int]=256 , a : Dict=255 , **a : Union[str, Any] , ) -> Optional[int]: super().__init__(**a ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( """Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be""" """ removed, as the behaviour will default to that of reshape_last_stage = True.""" , a , ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_encoder_blocks SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = sr_ratios SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = patch_sizes SCREAMING_SNAKE_CASE = strides SCREAMING_SNAKE_CASE = mlp_ratios SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = classifier_dropout_prob SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = drop_path_rate SCREAMING_SNAKE_CASE = layer_norm_eps SCREAMING_SNAKE_CASE = decoder_hidden_size SCREAMING_SNAKE_CASE = kwargs.get("""reshape_last_stage""" , a ) SCREAMING_SNAKE_CASE = semantic_loss_ignore_index class UpperCAmelCase_ ( A ): '''simple docstring''' a__ = version.parse('''1.11''' ) @property def _UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _UpperCAmelCase ( self : str ) -> float: return 1E-4 @property def _UpperCAmelCase ( self : Union[str, Any] ) -> int: return 12
450
import os import sys import unittest __A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""") class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" ) self.assertEqual(a , """torch""" ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" ) self.assertEqual(a , """torch_and_transformers""" ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") SCREAMING_SNAKE_CASE = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" ) self.assertEqual(a , """torch_and_transformers_and_onnx""" ) def _UpperCAmelCase ( self : List[str] ) -> int: SCREAMING_SNAKE_CASE = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , a ) self.assertIn("""torch_and_transformers""" , a ) self.assertIn("""flax_and_transformers""" , a ) self.assertIn("""torch_and_transformers_and_onnx""" , a ) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""" , objects["""torch"""] ) self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] ) self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] ) self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] ) self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] ) self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] ) def _UpperCAmelCase ( self : Any ) -> int: SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" ) self.assertEqual(a , """\nCONSTANT = None\n""" ) SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" ) self.assertEqual( a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" ) SCREAMING_SNAKE_CASE = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" ) self.assertEqual(a , a ) def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} ) self.assertEqual(dummy_files["""torch"""] , a )
450
1
from ..utils import DummyObject, requires_backends class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : Any =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : Any =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> int: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : int =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> str: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class A__ ( metaclass=UpperCAmelCase_ ): lowerCamelCase__ : List[str] =["torch", "transformers", "onnx"] def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
154
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a__ : Tuple = logging.get_logger(__name__) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(SCREAMING_SNAKE_CASE_ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase =["pixel_values"] def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ): super().__init__(**a__ ) UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256} UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = offset UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ): UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" in size: UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ ) elif "height" in size and "width" in size: UpperCAmelCase = (size['''height'''], size['''width''']) else: raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): UpperCAmelCase = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ): UpperCAmelCase = image.astype(np.floataa ) if offset: UpperCAmelCase = image - (scale / 2) return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ): return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase = to_numpy_array(a__ ) if do_resize: UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ ) if do_center_crop: UpperCAmelCase = self.center_crop(a__ , size=a__ ) if do_rescale: UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ ) if do_normalize: UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ ) UpperCAmelCase = to_channel_dimension_format(a__ , a__ ) return image def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = offset if offset is not None else self.offset UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ ) UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase = make_batched(a__ ) UpperCAmelCase = [ [ self._preprocess_image( image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , ) for img in video ] for video in videos ] UpperCAmelCase = {'''pixel_values''': videos} return BatchFeature(data=a__ , tensor_type=a__ )
51
0
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ): if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(A__ ,A__ ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate UpperCAmelCase_ : Optional[Any] = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly UpperCAmelCase_ : str = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
463
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase_ (metaclass=__A ): __magic_name__ = ['''onnx'''] def __init__( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Dict: requires_backends(self , ["onnx"] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> int: requires_backends(cls , ["onnx"] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : str ) -> Optional[Any]: requires_backends(cls , ["onnx"] )
463
1
"""simple docstring""" from __future__ import annotations def _SCREAMING_SNAKE_CASE ( _lowercase : list[float] ) ->bool: '''simple docstring''' if len(_lowercase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) a : List[Any] = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
633
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]: '''simple docstring''' a : Any = [] a : List[str] = set({"(", "[", "{"} ) a : int = set({")", "]", "}"} ) a : int = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowercase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowercase ) == 0 def _SCREAMING_SNAKE_CASE ( ) ->Tuple: '''simple docstring''' a : Any = input("Enter sequence of brackets: " ) if is_balanced(_lowercase ): print(_lowercase , "is balanced" ) else: print(_lowercase , "is not balanced" ) if __name__ == "__main__": main()
633
1
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCAmelCase ( A__: str , A__: str ) -> str | Literal[False]: __lowerCamelCase : str = list(A__ ) __lowerCamelCase : Optional[Any] = list(A__ ) __lowerCamelCase : Union[str, Any] = 0 for i in range(len(A__ ) ): if lista[i] != lista[i]: count += 1 __lowerCamelCase : int = '_' if count > 1: return False else: return "".join(A__ ) def UpperCAmelCase ( A__: list[str] ) -> list[str]: __lowerCamelCase : Union[str, Any] = [] while True: __lowerCamelCase : int = ['$'] * len(A__ ) __lowerCamelCase : Optional[int] = [] for i in range(len(A__ ) ): for j in range(i + 1 , len(A__ ) ): __lowerCamelCase : Union[str, Any] = compare_string(binary[i] , binary[j] ) if k is False: __lowerCamelCase : str = '*' __lowerCamelCase : Dict = '*' temp.append('X' ) for i in range(len(A__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(A__ ) == 0: return pi __lowerCamelCase : Union[str, Any] = list(set(A__ ) ) def UpperCAmelCase ( A__: int , A__: Sequence[float] ) -> list[str]: __lowerCamelCase : Any = [] for minterm in minterms: __lowerCamelCase : Union[str, Any] = '' for _ in range(A__ ): __lowerCamelCase : Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(A__ ) return temp def UpperCAmelCase ( A__: str , A__: str , A__: int ) -> bool: __lowerCamelCase : Any = list(A__ ) __lowerCamelCase : int = list(A__ ) __lowerCamelCase : Optional[int] = 0 for i in range(len(A__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCAmelCase ( A__: list[list[int]] , A__: list[str] ) -> list[str]: __lowerCamelCase : Union[str, Any] = [] __lowerCamelCase : List[Any] = [0] * len(A__ ) for i in range(len(chart[0] ) ): __lowerCamelCase : Optional[int] = 0 __lowerCamelCase : int = -1 for j in range(len(A__ ) ): if chart[j][i] == 1: count += 1 __lowerCamelCase : Dict = j if count == 1: __lowerCamelCase : Optional[int] = 1 for i in range(len(A__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(A__ ) ): __lowerCamelCase : Optional[int] = 0 temp.append(prime_implicants[i] ) while True: __lowerCamelCase : Any = 0 __lowerCamelCase : Union[str, Any] = -1 __lowerCamelCase : Optional[Any] = 0 for i in range(len(A__ ) ): __lowerCamelCase : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: __lowerCamelCase : Union[str, Any] = count_n __lowerCamelCase : Union[str, Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(A__ ) ): __lowerCamelCase : Union[str, Any] = 0 def UpperCAmelCase ( A__: list[str] , A__: list[str] ) -> list[list[int]]: __lowerCamelCase : List[Any] = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )] for i in range(len(A__ ) ): __lowerCamelCase : Tuple = prime_implicants[i].count('_' ) for j in range(len(A__ ) ): if is_for_table(prime_implicants[i] , binary[j] , A__ ): __lowerCamelCase : Any = 1 return chart def UpperCAmelCase ( ) -> None: __lowerCamelCase : Optional[int] = int(input('Enter the no. of variables\n' ) ) __lowerCamelCase : List[Any] = [ float(A__ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] __lowerCamelCase : Union[str, Any] = decimal_to_binary(A__ , A__ ) __lowerCamelCase : List[Any] = check(A__ ) print('Prime Implicants are:' ) print(A__ ) __lowerCamelCase : List[str] = prime_implicant_chart(A__ , A__ ) __lowerCamelCase : List[str] = selection(A__ , A__ ) print('Essential Prime Implicants are:' ) print(A__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
"""simple docstring""" import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig a_ : Tuple = logging.get_logger(__name__) # General docstring a_ : List[str] = '''PoolFormerConfig''' # Base docstring a_ : Optional[Any] = '''sail/poolformer_s12''' a_ : List[Any] = [1, 5_12, 7, 7] # Image classification docstring a_ : Any = '''sail/poolformer_s12''' a_ : Optional[int] = '''tabby, tabby cat''' a_ : Optional[Any] = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple: if drop_prob == 0.0 or not training: return input __lowerCamelCase : Dict = 1 - drop_prob __lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets __lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize __lowerCamelCase : Any = input.div(A__ ) * random_tensor return output class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a = None ): super().__init__() __lowerCamelCase : int = drop_prob def snake_case_ ( self , __a ): return drop_path(__a , self.drop_prob , self.training ) def snake_case_ ( self ): return "p={}".format(self.drop_prob ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a , __a=None ): super().__init__() __lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size) __lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride) __lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding) __lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a ) __lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity() def snake_case_ ( self , __a ): __lowerCamelCase : List[Any] = self.projection(__a ) __lowerCamelCase : Dict = self.norm(__a ) return embeddings class __lowercase( nn.GroupNorm ): '''simple docstring''' def __init__( self , __a , **__a ): super().__init__(1 , __a , **__a ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a ) def snake_case_ ( self , __a ): return self.pool(__a ) - hidden_states class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a ): super().__init__() __lowerCamelCase : Any = nn.Convad(__a , __a , 1 ) __lowerCamelCase : Dict = nn.Convad(__a , __a , 1 ) __lowerCamelCase : List[Any] = PoolFormerDropPath(__a ) if isinstance(config.hidden_act , __a ): __lowerCamelCase : List[str] = ACTaFN[config.hidden_act] else: __lowerCamelCase : str = config.hidden_act def snake_case_ ( self , __a ): __lowerCamelCase : int = self.conva(__a ) __lowerCamelCase : Dict = self.act_fn(__a ) __lowerCamelCase : List[str] = self.drop(__a ) __lowerCamelCase : int = self.conva(__a ) __lowerCamelCase : str = self.drop(__a ) return hidden_states class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a , __a , __a , __a , __a , __a ): super().__init__() __lowerCamelCase : Tuple = PoolFormerPooling(__a ) __lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a ) __lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a ) __lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a ) # Useful for training neural nets __lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity() __lowerCamelCase : Tuple = config.use_layer_scale if config.use_layer_scale: __lowerCamelCase : List[str] = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) __lowerCamelCase : Optional[int] = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) def snake_case_ ( self , __a ): if self.use_layer_scale: __lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) ) __lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection __lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a ) __lowerCamelCase : Tuple = () __lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) ) __lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection __lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a ) __lowerCamelCase : Optional[Any] = (output,) + outputs return outputs else: __lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) ) # First residual connection __lowerCamelCase : List[str] = pooling_output + hidden_states __lowerCamelCase : int = () # Second residual connection inside the PoolFormerOutput block __lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) ) __lowerCamelCase : str = hidden_states + layer_output __lowerCamelCase : int = (output,) + outputs return outputs class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : int = config # stochastic depth decay rule __lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings __lowerCamelCase : List[str] = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) __lowerCamelCase : Optional[int] = nn.ModuleList(__a ) # Transformer blocks __lowerCamelCase : Any = [] __lowerCamelCase : int = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers __lowerCamelCase : Optional[int] = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__a ) ) __lowerCamelCase : str = nn.ModuleList(__a ) def snake_case_ ( self , __a , __a=False , __a=True ): __lowerCamelCase : Union[str, Any] = () if output_hidden_states else None __lowerCamelCase : int = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): __lowerCamelCase , __lowerCamelCase : Any = layers # Get patch embeddings from hidden_states __lowerCamelCase : Any = embedding_layer(__a ) # Send the embeddings through the blocks for _, blk in enumerate(__a ): __lowerCamelCase : Optional[int] = blk(__a ) __lowerCamelCase : Tuple = layer_outputs[0] if output_hidden_states: __lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class __lowercase( lowercase__ ): '''simple docstring''' __a : Tuple = PoolFormerConfig __a : Tuple = 'poolformer' __a : Optional[int] = 'pixel_values' __a : Optional[Any] = True def snake_case_ ( self , __a ): if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def snake_case_ ( self , __a , __a=False ): if isinstance(__a , __a ): __lowerCamelCase : Union[str, Any] = value a_ : Union[str, Any] = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' a_ : List[str] = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( 'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , ) class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a ): super().__init__(__a ) __lowerCamelCase : Optional[Any] = config __lowerCamelCase : Any = PoolFormerEncoder(__a ) # Initialize weights and apply final processing self.post_init() def snake_case_ ( self ): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case_ ( self , __a = None , __a = None , __a = None , ): __lowerCamelCase : Union[str, Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __lowerCamelCase : Any = self.encoder( __a , output_hidden_states=__a , return_dict=__a , ) __lowerCamelCase : int = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , ) class __lowercase( nn.Module ): '''simple docstring''' def __init__( self , __a ): super().__init__() __lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size ) def snake_case_ ( self , __a ): __lowerCamelCase : List[Any] = self.dense(__a ) return output @add_start_docstrings( '\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , ) class __lowercase( lowercase__ ): '''simple docstring''' def __init__( self , __a ): super().__init__(__a ) __lowerCamelCase : str = config.num_labels __lowerCamelCase : Optional[Any] = PoolFormerModel(__a ) # Final norm __lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head __lowerCamelCase : Optional[Any] = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ): __lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase : Tuple = self.poolformer( __a , output_hidden_states=__a , return_dict=__a , ) __lowerCamelCase : int = outputs[0] __lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) ) __lowerCamelCase : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCamelCase : Any = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCamelCase : Any = 'single_label_classification' else: __lowerCamelCase : Optional[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCamelCase : int = MSELoss() if self.num_labels == 1: __lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCamelCase : Optional[Any] = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __lowerCamelCase : Tuple = CrossEntropyLoss() __lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCamelCase : List[Any] = BCEWithLogitsLoss() __lowerCamelCase : Optional[Any] = loss_fct(__a , __a ) if not return_dict: __lowerCamelCase : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
263
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( _A): """simple docstring""" a__ : List[str] = ["image_processor", "tokenizer"] a__ : Optional[Any] = "LayoutLMv3ImageProcessor" a__ : Union[str, Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ) -> Any: _A = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) _A = kwargs.pop('''feature_extractor''' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) # first, apply the image processor _A = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _A = [text] # add batch dimension (as the image processor always adds a batch dimension) _A = features['''words'''] _A = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) # add pixel values _A = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: _A = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] ) _A = images return encoded_inputs def snake_case_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _A = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' ) return images_with_overflow def snake_case_ ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[int]: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def snake_case_ ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> str: return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def snake_case_ ( self : List[str] ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def snake_case_ ( self : Optional[Any] ) -> List[str]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class @property def snake_case_ ( self : List[Any] ) -> Tuple: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , ) return self.image_processor
2
'''simple docstring''' def A__ ( A : int , A : int): '''simple docstring''' return int((input_a, input_a).count(0) != 0) def A__ ( ): '''simple docstring''' assert nand_gate(0 , 0) == 1 assert nand_gate(0 , 1) == 1 assert nand_gate(1 , 0) == 1 assert nand_gate(1 , 1) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
173
0
import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _snake_case ( self ): torch.manual_seed(0 ) lowerCamelCase =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def _snake_case ( self ): torch.manual_seed(0 ) lowerCamelCase =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def _snake_case ( self ): torch.manual_seed(0 ) lowerCamelCase =AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) lowerCamelCase =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def _snake_case ( self ): lowerCamelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) lowerCamelCase =DDPMScheduler() lowerCamelCase =AudioDiffusionPipeline(vqvae=UpperCAmelCase_ , unet=self.dummy_unet , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) lowerCamelCase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 ) lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 ) lowerCamelCase =output.audios[0] lowerCamelCase =output.images[0] lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 ) lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 , return_dict=UpperCAmelCase_ ) lowerCamelCase =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] lowerCamelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] lowerCamelCase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 lowerCamelCase =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) lowerCamelCase =DDIMScheduler() lowerCamelCase =self.dummy_vqvae_and_unet lowerCamelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) lowerCamelCase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) np.random.seed(0 ) lowerCamelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 ) lowerCamelCase =pipe(raw_audio=UpperCAmelCase_ , generator=UpperCAmelCase_ , start_step=5 , steps=10 ) lowerCamelCase =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] lowerCamelCase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 lowerCamelCase =self.dummy_unet_condition lowerCamelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase_ , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ ) lowerCamelCase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) np.random.seed(0 ) lowerCamelCase =torch.rand((1, 1, 10) ) lowerCamelCase =pipe(generator=UpperCAmelCase_ , encoding=UpperCAmelCase_ ) lowerCamelCase =output.images[0] lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] lowerCamelCase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __A ( unittest.TestCase ): def _snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ): lowerCamelCase =torch_device lowerCamelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) lowerCamelCase =pipe.to(UpperCAmelCase_ ) pipe.set_progress_bar_config(disable=UpperCAmelCase_ ) lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 ) lowerCamelCase =pipe(generator=UpperCAmelCase_ ) lowerCamelCase =output.audios[0] lowerCamelCase =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] lowerCamelCase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
269
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ : str ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] =['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any =['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict =[ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
269
1
'''simple docstring''' import argparse import os import torch from transformers.utils import WEIGHTS_NAME lowerCAmelCase : Optional[int] =['''small''', '''medium''', '''large'''] lowerCAmelCase : List[str] ='''lm_head.decoder.weight''' lowerCAmelCase : Optional[Any] ='''lm_head.weight''' def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ): lowercase_ :Any = torch.load(__lowerCamelCase ) lowercase_ :Tuple = d.pop(__lowerCamelCase ) os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase ) torch.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,__lowerCamelCase ) ) if __name__ == "__main__": lowerCAmelCase : int =argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) lowerCAmelCase : List[str] =parser.parse_args() for MODEL in DIALOGPT_MODELS: lowerCAmelCase : int =os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''') lowerCAmelCase : List[str] =F'''./DialoGPT-{MODEL}''' convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
172
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : List[Any] ={ '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json''' ), '''distilbert-base-uncased-finetuned-sst-2-english''': ( '''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json''' ), } class a_ ( _lowerCAmelCase ): __A = "distilbert" __A = { "hidden_size": "dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", } def __init__( self : int , lowercase : Union[str, Any]=30_522 , lowercase : List[Any]=512 , lowercase : Tuple=False , lowercase : Dict=6 , lowercase : List[str]=12 , lowercase : Union[str, Any]=768 , lowercase : int=4 * 768 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]=0.1 , lowercase : List[str]="gelu" , lowercase : Tuple=0.02 , lowercase : int=0.1 , lowercase : Any=0.2 , lowercase : List[Any]=0 , **lowercase : Optional[Any] , ): """simple docstring""" lowercase_ :Optional[int] = vocab_size lowercase_ :Optional[int] = max_position_embeddings lowercase_ :List[Any] = sinusoidal_pos_embds lowercase_ :Dict = n_layers lowercase_ :List[str] = n_heads lowercase_ :int = dim lowercase_ :str = hidden_dim lowercase_ :Tuple = dropout lowercase_ :Any = attention_dropout lowercase_ :Optional[int] = activation lowercase_ :Dict = initializer_range lowercase_ :int = qa_dropout lowercase_ :Tuple = seq_classif_dropout super().__init__(**lowercase , pad_token_id=lowercase ) class a_ ( _lowerCAmelCase ): @property def lowercase__ ( self : Union[str, Any] ): """simple docstring""" if self.task == "multiple-choice": lowercase_ :int = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase_ :Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
172
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __lowercase :str = KandinskyImgaImgPipeline __lowercase :Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image"] __lowercase :Optional[Any] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] __lowercase :Any = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __lowercase :Tuple = False @property def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return 32 @property def _lowerCAmelCase ( self ) -> Any: '''simple docstring''' return 32 @property def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.time_input_dim @property def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' return self.time_input_dim * 4 @property def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return 100 @property def _lowerCAmelCase ( self ) -> Any: '''simple docstring''' lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) lowerCamelCase_ = MultilingualCLIP(UpperCamelCase__ ) lowerCamelCase_ = text_encoder.eval() return text_encoder @property def _lowerCAmelCase ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase__ ) return model @property def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.dummy_text_encoder lowerCamelCase_ = self.dummy_tokenizer lowerCamelCase_ = self.dummy_unet lowerCamelCase_ = self.dummy_movq lowerCamelCase_ = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00_085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowerCamelCase_ = DDIMScheduler(**UpperCamelCase__ ) lowerCamelCase_ = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any: '''simple docstring''' lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ ) # create init_image lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) ) if str(UpperCamelCase__ ).startswith('''mps''' ): lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ ) else: lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) lowerCamelCase_ = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = '''cpu''' lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ ) lowerCamelCase_ = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) ) lowerCamelCase_ = output.images lowerCamelCase_ = pipe( **self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCamelCase_ = np.array( [0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) lowerCamelCase_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowerCamelCase_ = '''A red cartoon frog, 4k''' lowerCamelCase_ = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase__ ) lowerCamelCase_ = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) lowerCamelCase_ = pipeline.to(UpperCamelCase__ ) pipeline.set_progress_bar_config(disable=UpperCamelCase__ ) lowerCamelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase_ , lowerCamelCase_ = pipe_prior( UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowerCamelCase_ = pipeline( UpperCamelCase__ , image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) lowerCamelCase_ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
66
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = '''''' lowerCamelCase_ = '''''' lowerCamelCase_ = [] lowerCamelCase_ = 0 lowerCamelCase_ = 256 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' lowerCamelCase_ = cva.imread(UpperCamelCase__ , 0 ) lowerCamelCase_ = copy.deepcopy(self.img ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' ) lowerCamelCase_ = np.sum(UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) ): lowerCamelCase_ = x[i] / self.k self.sk += prk lowerCamelCase_ = (self.L - 1) * self.sk if self.rem != 0: lowerCamelCase_ = int(last % last ) lowerCamelCase_ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(UpperCamelCase__ ) lowerCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCamelCase_ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCamelCase_ = self.img[j][i] if num != self.last_list[num]: lowerCamelCase_ = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def _lowerCAmelCase ( self ) -> str: '''simple docstring''' plt.hist(self.img.ravel() , 256 , [0, 256] ) def _lowerCAmelCase ( self ) -> int: '''simple docstring''' cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowercase : List[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") __lowercase : List[str] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
66
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer _lowerCamelCase = ['gpt2'] _lowerCamelCase = 'gpt2' if is_tf_available(): class UpperCamelCase_ ( tf.Module ): def __init__( self :Optional[Any] , __A :Union[str, Any] ) -> List[Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = tokenizer SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A ) SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__A ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def _snake_case ( self :Optional[int] , __A :int ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.tokenizer(__A ) SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor() SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) SCREAMING_SNAKE_CASE__ = self.model(input_ids=__A , attention_mask=__A )["""logits"""] return outputs @require_tf @require_keras_nlp class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :List[Any] ) -> Dict: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)] SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) SCREAMING_SNAKE_CASE__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _snake_case ( self :Any ) -> Any: """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" ) SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy() SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) ) @slow def _snake_case ( self :Any ) -> int: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE__ = tf.function(__A ) for test_inputs in self.test_sentences: SCREAMING_SNAKE_CASE__ = tf.constant(__A ) SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__A ) SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _snake_case ( self :List[Any] ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__A ) SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] ) SCREAMING_SNAKE_CASE__ = model.serving(__A ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: SCREAMING_SNAKE_CASE__ = Path(__A ) / """saved.model""" tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} ) SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__A ) SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__A )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _snake_case ( self :Dict ) -> int: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] ) SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) # Build model with some sample inputs SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config() SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__A ) SCREAMING_SNAKE_CASE__ = model_from_config(__A ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _snake_case ( self :List[Any] ) -> Any: """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run SCREAMING_SNAKE_CASE__ = 12_3123 for max_length in [3, 5, 1024]: SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] ) SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A , max_length=__A ) SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
6
from copy import deepcopy class __UpperCamelCase : def __init__( self : List[str] , lowerCAmelCase : list[int] | None = None , lowerCAmelCase : int | None = None ): '''simple docstring''' if arr is None and size is not None: UpperCAmelCase_ = size UpperCAmelCase_ = [0] * size elif arr is not None: self.init(lowerCAmelCase ) else: raise ValueError("Either arr or size must be specified" ) def __A ( self : Tuple , lowerCAmelCase : list[int] ): '''simple docstring''' UpperCAmelCase_ = len(lowerCAmelCase ) UpperCAmelCase_ = deepcopy(lowerCAmelCase ) for i in range(1 , self.size ): UpperCAmelCase_ = self.next_(lowerCAmelCase ) if j < self.size: self.tree[j] += self.tree[i] def __A ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): UpperCAmelCase_ = self.next_(lowerCAmelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __A ( lowerCAmelCase : int ): '''simple docstring''' return index + (index & (-index)) @staticmethod def __A ( lowerCAmelCase : int ): '''simple docstring''' return index - (index & (-index)) def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ): '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value UpperCAmelCase_ = self.next_(lowerCAmelCase ) def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ): '''simple docstring''' self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) ) def __A ( self : Tuple , lowerCAmelCase : int ): '''simple docstring''' if right == 0: return 0 UpperCAmelCase_ = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] UpperCAmelCase_ = self.prev(lowerCAmelCase ) return result def __A ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ): '''simple docstring''' return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase ) def __A ( self : Any , lowerCAmelCase : int ): '''simple docstring''' return self.query(lowerCAmelCase , index + 1 ) def __A ( self : List[str] , lowerCAmelCase : int ): '''simple docstring''' value -= self.tree[0] if value < 0: return -1 UpperCAmelCase_ = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 UpperCAmelCase_ = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
162
0
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__) class __snake_case ( __snake_case ): '''simple docstring''' lowerCAmelCase__ = 'token-classification' def __init__( self : str , A : Tuple ): if type(A_ ) == dict: __snake_case: Any = Namespace(**A_ ) __snake_case: Tuple = import_module("""tasks""" ) try: __snake_case: Dict = getattr(A_ , hparams.task_type ) __snake_case: TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) __snake_case: List[Any] = self.token_classification_task.get_labels(hparams.labels ) __snake_case: Any = CrossEntropyLoss().ignore_index super().__init__(A_ , len(self.labels ) , self.mode ) def UpperCAmelCase__ ( self : Union[str, Any] , **A : Dict ): return self.model(**A_ ) def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] , A : List[str] ): __snake_case: Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": __snake_case: str = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids __snake_case: List[str] = self(**A_ ) __snake_case: Optional[int] = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def UpperCAmelCase__ ( self : List[Any] ): __snake_case: List[str] = self.hparams for mode in ["train", "dev", "test"]: __snake_case: str = self._feature_file(A_ ) if os.path.exists(A_ ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , A_ ) __snake_case: Union[str, Any] = torch.load(A_ ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __snake_case: int = self.token_classification_task.read_examples_from_file(args.data_dir , A_ ) __snake_case: Optional[Any] = self.token_classification_task.convert_examples_to_features( A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , A_ ) torch.save(A_ , A_ ) def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : Union[str, Any] , A : str = False ): __snake_case: Optional[Any] = self._feature_file(A_ ) logger.info("""Loading features from cached file %s""" , A_ ) __snake_case: str = torch.load(A_ ) __snake_case: Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __snake_case: List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: __snake_case: List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: __snake_case: str = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) __snake_case: Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ ) def UpperCAmelCase__ ( self : Tuple , A : Any , A : Tuple ): """Compute validation""" "" __snake_case: int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": __snake_case: List[Any] = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids __snake_case: Optional[Any] = self(**A_ ) __snake_case: Tuple = outputs[:2] __snake_case: Optional[Any] = logits.detach().cpu().numpy() __snake_case: Optional[Any] = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCAmelCase__ ( self : str , A : Optional[int] ): __snake_case: Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() __snake_case: List[str] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) __snake_case: List[str] = np.argmax(A_ , axis=2 ) __snake_case: Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __snake_case: List[str] = dict(enumerate(self.labels ) ) __snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )] __snake_case: List[Any] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) __snake_case: Union[str, Any] = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(A_ , A_ ), "precision": precision_score(A_ , A_ ), "recall": recall_score(A_ , A_ ), "f1": fa_score(A_ , A_ ), } __snake_case: Tuple = dict(results.items() ) __snake_case: int = results return ret, preds_list, out_label_list def UpperCAmelCase__ ( self : Tuple , A : Optional[int] ): __snake_case: str = self._eval_end(A_ ) __snake_case: Dict = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCAmelCase__ ( self : int , A : Tuple ): __snake_case: List[str] = self._eval_end(A_ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 __snake_case: Tuple = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCAmelCase__ ( A : Any , A : str ): BaseTransformer.add_model_specific_args(A_ , A_ ) parser.add_argument( """--task_type""" , default="""NER""" , type=A_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=A_ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=A_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=A_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __UpperCAmelCase : Tuple = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __UpperCAmelCase : Union[str, Any] = NERTransformer.add_model_specific_args(parser, os.getcwd()) __UpperCAmelCase : Tuple = parser.parse_args() __UpperCAmelCase : Any = NERTransformer(args) __UpperCAmelCase : Tuple = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __UpperCAmelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True)) __UpperCAmelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
716
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : str = 3 class __snake_case ( __lowerCamelCase ): '''simple docstring''' pass def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]: for shard in shards: for i in range(SCREAMING_SNAKE_CASE__): yield {"i": i, "shard": shard} def A__ ( ) -> Optional[Any]: __snake_case: Optional[int] = int(os.environ["""RANK"""]) __snake_case: Dict = int(os.environ["""WORLD_SIZE"""]) __snake_case: Union[str, Any] = ArgumentParser() parser.add_argument("""--streaming""" , type=SCREAMING_SNAKE_CASE__) parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__) parser.add_argument("""--num_workers""" , type=SCREAMING_SNAKE_CASE__ , default=0) __snake_case: Union[str, Any] = parser.parse_args() __snake_case: Union[str, Any] = args.streaming __snake_case: Dict = args.num_workers __snake_case: Optional[Any] = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE__)]} __snake_case: Union[str, Any] = IterableDataset.from_generator(SCREAMING_SNAKE_CASE__ , gen_kwargs=SCREAMING_SNAKE_CASE__) if not streaming: __snake_case: int = Dataset.from_list(list(SCREAMING_SNAKE_CASE__)) __snake_case: List[str] = split_dataset_by_node(SCREAMING_SNAKE_CASE__ , rank=SCREAMING_SNAKE_CASE__ , world_size=SCREAMING_SNAKE_CASE__) __snake_case: Tuple = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__) __snake_case: int = NUM_SHARDS * NUM_ITEMS_PER_SHARD __snake_case: str = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) __snake_case: Tuple = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
155
0
'''simple docstring''' class __magic_name__ : def __init__( self : Optional[Any] ): _a : dict[str, TrieNode] = {} # Mapping from char to TrieNode _a : List[Any] = False def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : list[str] ): for word in words: self.insert(_UpperCAmelCase ) def __lowercase ( self : Any ,_UpperCAmelCase : str ): _a : str = self for char in word: if char not in curr.nodes: _a : List[Any] = TrieNode() _a : Dict = curr.nodes[char] _a : Optional[int] = True def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ): _a : Optional[Any] = self for char in word: if char not in curr.nodes: return False _a : Optional[Any] = curr.nodes[char] return curr.is_leaf def __lowercase ( self : int ,_UpperCAmelCase : str ): def _delete(_UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ,_UpperCAmelCase : int ) -> bool: if index == len(_UpperCAmelCase ): # If word does not exist if not curr.is_leaf: return False _a : str = False return len(curr.nodes ) == 0 _a : List[str] = word[index] _a : Any = curr.nodes.get(_UpperCAmelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted _a : Optional[int] = _delete(_UpperCAmelCase ,_UpperCAmelCase ,index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self ,_UpperCAmelCase ,0 ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if node.is_leaf: print(lowerCAmelCase_ , end=' ' ) for key, value in node.nodes.items(): print_words(lowerCAmelCase_ , word + key ) def __lowerCamelCase ( ) -> bool: _a : Optional[Any] = 'banana bananas bandana band apple all beast'.split() _a : str = TrieNode() root.insert_many(lowerCAmelCase_ ) # print_words(root, "") assert all(root.find(lowerCAmelCase_ ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: print(str(lowerCAmelCase_ ) , 'works!' if passes else 'doesn\'t work :(' ) def __lowerCamelCase ( ) -> None: assert test_trie() def __lowerCamelCase ( ) -> None: print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
358
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
358
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a(): '''simple docstring''' snake_case_ = ArgumentParser( description=( 'PyTorch TPU distributed training launch ' 'helper utility that will spawn up ' 'multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=lowercase__ , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=lowercase__ , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=lowercase__ ) return parser.parse_args() def a(): '''simple docstring''' snake_case_ = parse_args() # Import training_script as a module. snake_case_ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case_ = script_fpath.stem snake_case_ = importlib.import_module(lowercase__ ) # Patch sys.argv snake_case_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
46
from collections import defaultdict def a(lowercase__ , lowercase__ ): '''simple docstring''' snake_case_ = first_str.lower().strip() snake_case_ = second_str.lower().strip() # Remove whitespace snake_case_ = first_str.replace(' ' , '' ) snake_case_ = second_str.replace(' ' , '' ) # Strings of different lengths are not anagrams if len(lowercase__ ) != len(lowercase__ ): return False # Default values for count should be 0 snake_case_ = defaultdict(lowercase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowercase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() A = input('Enter the first string ').strip() A = input('Enter the second string ').strip() A = check_anagrams(input_a, input_b) print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
46
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING a__ : Union[str, Any] = logging.get_logger(__name__) a__ : List[Any] = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = "instructblip_vision_model" def __init__( self : Tuple , lowerCAmelCase : List[str]=14_08 , lowerCAmelCase : Union[str, Any]=61_44 , lowerCAmelCase : Optional[Any]=39 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : Optional[int]=14 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Optional[Any]=1E-6 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=1E-1_0 , lowerCAmelCase : Any=True , **lowerCAmelCase : Dict , ) -> Any: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = hidden_size lowercase__ = intermediate_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = patch_size lowercase__ = image_size lowercase__ = initializer_range lowercase__ = attention_dropout lowercase__ = layer_norm_eps lowercase__ = hidden_act lowercase__ = qkv_bias @classmethod def UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Optional[int]) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase) lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type') == "instructblip": lowercase__ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(lowerCAmelCase , **lowerCAmelCase) class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Dict = "instructblip_qformer" def __init__( self : Union[str, Any] , lowerCAmelCase : str=3_05_22 , lowerCAmelCase : Tuple=7_68 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : int=12 , lowerCAmelCase : str=30_72 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Any=1E-1_2 , lowerCAmelCase : Any=0 , lowerCAmelCase : Union[str, Any]="absolute" , lowerCAmelCase : Any=2 , lowerCAmelCase : str=14_08 , **lowerCAmelCase : List[Any] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = cross_attention_frequency lowercase__ = encoder_hidden_size @classmethod def UpperCAmelCase ( cls : List[str] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Any) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase) lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type') == "instructblip": lowercase__ = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''') return cls.from_dict(lowerCAmelCase , **lowerCAmelCase) class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Tuple = "instructblip" A : Optional[int] = True def __init__( self : Optional[int] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=32 , **lowerCAmelCase : int) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCAmelCase) if vision_config is None: lowercase__ = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.') if qformer_config is None: lowercase__ = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.') if text_config is None: lowercase__ = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).') lowercase__ = InstructBlipVisionConfig(**lowerCAmelCase) lowercase__ = InstructBlipQFormerConfig(**lowerCAmelCase) lowercase__ = text_config['model_type'] if 'model_type' in text_config else 'opt' lowercase__ = CONFIG_MAPPING[text_model_type](**lowerCAmelCase) lowercase__ = self.text_config.tie_word_embeddings lowercase__ = self.text_config.is_encoder_decoder lowercase__ = num_query_tokens lowercase__ = self.vision_config.hidden_size lowercase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowercase__ = 1.0 lowercase__ = 0.02 @classmethod def UpperCAmelCase ( cls : Dict , lowerCAmelCase : InstructBlipVisionConfig , lowerCAmelCase : InstructBlipQFormerConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Tuple , ) -> List[str]: """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase , ) def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = copy.deepcopy(self.__dict__) lowercase__ = self.vision_config.to_dict() lowercase__ = self.qformer_config.to_dict() lowercase__ = self.text_config.to_dict() lowercase__ = self.__class__.model_type return output
622
from sklearn.metrics import matthews_corrcoef import datasets a__ : Any = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n" a__ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n" a__ : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int32'), 'references': datasets.Value('int32'), }) , reference_urls=[ 'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html' ] , ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=None) -> Any: """simple docstring""" return { "matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)), }
622
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = ['''ConvNextFeatureExtractor'''] lowercase = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
717
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def a_ ( self ): __SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : str = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : Tuple = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } __SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , a__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(a__ , a__ ) def a_ ( self , **a__ ): return BertTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a_ ( self , **a__ ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ ) def a_ ( self , **a__ ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **a__ ) def a_ ( self ): shutil.rmtree(self.tmpdirname ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self ): __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ ) processor_slow.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=a__ ) __SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ ) processor_fast.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , a__ ) self.assertIsInstance(processor_fast.tokenizer , a__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , a__ ) self.assertIsInstance(processor_fast.image_processor , a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 ) __SCREAMING_SNAKE_CASE : Any = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , a__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Dict = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = AlignProcessor(tokenizer=a__ , image_processor=a__ ) __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="np" ) __SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor() __SCREAMING_SNAKE_CASE : int = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ ) __SCREAMING_SNAKE_CASE : Any = "lower newer" __SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ ) __SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding="max_length" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self ): __SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor() __SCREAMING_SNAKE_CASE : str = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ ) __SCREAMING_SNAKE_CASE : List[str] = "lower newer" __SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a__ , images=a__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(a__ ): processor() def a_ ( self ): __SCREAMING_SNAKE_CASE : str = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Dict = AlignProcessor(tokenizer=a__ , image_processor=a__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(a__ ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(a__ ) self.assertListEqual(a__ , a__ ) def a_ ( self ): __SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer() __SCREAMING_SNAKE_CASE : str = AlignProcessor(tokenizer=a__ , image_processor=a__ ) __SCREAMING_SNAKE_CASE : Any = "lower newer" __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ , images=a__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
564
0
from __future__ import annotations from collections.abc import Callable __A : int = list[list[float | int]] def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix: """simple docstring""" _A = len(_SCREAMING_SNAKE_CASE ) _A = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )] _A = 42 _A = 42 _A = 42 _A = 42 _A = 42 _A = 42 for row in range(_SCREAMING_SNAKE_CASE ): for col in range(_SCREAMING_SNAKE_CASE ): _A = matrix[row][col] _A = vector[row][0] _A = 0 _A = 0 while row < size and col < size: # pivoting _A = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: _A, _A = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ): _A = augmented[rowa][col] / augmented[row][col] _A = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , _SCREAMING_SNAKE_CASE ): for row in range(_SCREAMING_SNAKE_CASE ): _A = augmented[row][col] / augmented[col][col] for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE ) ] def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Callable[[int], int]: """simple docstring""" _A = len(_SCREAMING_SNAKE_CASE ) _A = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] _A = [[0] for _ in range(_SCREAMING_SNAKE_CASE )] _A = 42 _A = 42 _A = 42 _A = 42 for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ): for col in range(_SCREAMING_SNAKE_CASE ): _A = (x_val + 1) ** (size - col - 1) _A = y_val _A = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def interpolated_func(_SCREAMING_SNAKE_CASE ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(_SCREAMING_SNAKE_CASE ) ) return interpolated_func def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = question_function , _SCREAMING_SNAKE_CASE = 10 ) -> int: """simple docstring""" _A = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )] _A = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] _A = 0 _A = 42 _A = 42 for poly in polynomials: _A = 1 while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ): x_val += 1 ret += poly(_SCREAMING_SNAKE_CASE ) return ret if __name__ == "__main__": print(f"{solution() = }")
27
from __future__ import annotations from typing import Any class lowerCAmelCase : '''simple docstring''' def __init__( self : str , __snake_case : int = 6 ) -> None: '''simple docstring''' lowerCamelCase = None lowerCamelCase = None self.create_linked_list(__snake_case ) def lowerCamelCase__ ( self : Optional[int] , __snake_case : int ) -> None: '''simple docstring''' lowerCamelCase = Node() lowerCamelCase = current_node lowerCamelCase = current_node lowerCamelCase = current_node for _ in range(1 , __snake_case ): lowerCamelCase = Node() lowerCamelCase = current_node lowerCamelCase = previous_node lowerCamelCase = current_node lowerCamelCase = self.front lowerCamelCase = previous_node def lowerCamelCase__ ( self : List[str] ) -> bool: '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def lowerCamelCase__ ( self : Tuple ) -> Any | None: '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any ) -> None: '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): lowerCamelCase = self.rear.next if self.rear: lowerCamelCase = data def lowerCamelCase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: lowerCamelCase = self.front.data lowerCamelCase = None return data lowerCamelCase = self.front lowerCamelCase = old_front.next lowerCamelCase = old_front.data lowerCamelCase = None return data def lowerCamelCase__ ( self : Optional[int] ) -> None: '''simple docstring''' if self.is_empty(): raise Exception('Empty Queue' ) def lowerCamelCase__ ( self : int ) -> None: '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception('Full Queue' ) class lowerCAmelCase : '''simple docstring''' def __init__( self : int ) -> None: '''simple docstring''' lowerCamelCase = None lowerCamelCase = None lowerCamelCase = None if __name__ == "__main__": import doctest doctest.testmod()
246
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ): def __init__( self : Dict ): """simple docstring""" __lowerCamelCase : Any = [] def a_ ( self : Optional[Any] , A__ : Dict , A__ : Optional[int] , A__ : Tuple , **A__ : Tuple ): """simple docstring""" self.events.append("""on_init_end""" ) def a_ ( self : str , A__ : List[Any] , A__ : Tuple , A__ : Tuple , **A__ : Optional[Any] ): """simple docstring""" self.events.append("""on_train_begin""" ) def a_ ( self : Tuple , A__ : List[Any] , A__ : List[str] , A__ : List[str] , **A__ : Tuple ): """simple docstring""" self.events.append("""on_train_end""" ) def a_ ( self : Union[str, Any] , A__ : str , A__ : str , A__ : Optional[Any] , **A__ : Dict ): """simple docstring""" self.events.append("""on_epoch_begin""" ) def a_ ( self : List[Any] , A__ : List[str] , A__ : List[Any] , A__ : Dict , **A__ : List[str] ): """simple docstring""" self.events.append("""on_epoch_end""" ) def a_ ( self : int , A__ : Optional[Any] , A__ : Any , A__ : Any , **A__ : int ): """simple docstring""" self.events.append("""on_step_begin""" ) def a_ ( self : Any , A__ : int , A__ : Any , A__ : Tuple , **A__ : int ): """simple docstring""" self.events.append("""on_step_end""" ) def a_ ( self : str , A__ : int , A__ : Optional[Any] , A__ : str , **A__ : Optional[Any] ): """simple docstring""" self.events.append("""on_evaluate""" ) def a_ ( self : str , A__ : str , A__ : Union[str, Any] , A__ : Any , **A__ : int ): """simple docstring""" self.events.append("""on_predict""" ) def a_ ( self : str , A__ : str , A__ : int , A__ : List[Any] , **A__ : Dict ): """simple docstring""" self.events.append("""on_save""" ) def a_ ( self : List[str] , A__ : str , A__ : List[str] , A__ : Any , **A__ : Dict ): """simple docstring""" self.events.append("""on_log""" ) def a_ ( self : int , A__ : List[Any] , A__ : List[str] , A__ : Any , **A__ : Union[str, Any] ): """simple docstring""" self.events.append("""on_prediction_step""" ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def a_ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase : List[Any] = tempfile.mkdtemp() def a_ ( self : Union[str, Any] ): """simple docstring""" shutil.rmtree(self.output_dir ) def a_ ( self : Optional[Any] , A__ : Any=0 , A__ : Any=0 , A__ : Dict=64 , A__ : Dict=64 , A__ : List[str]=None , A__ : Union[str, Any]=False , **A__ : Tuple ): """simple docstring""" __lowerCamelCase : Optional[int] = RegressionDataset(length=A__ ) __lowerCamelCase : Optional[int] = RegressionDataset(length=A__ ) __lowerCamelCase : Any = RegressionModelConfig(a=A__ , b=A__ ) __lowerCamelCase : Optional[Any] = RegressionPreTrainedModel(A__ ) __lowerCamelCase : List[str] = TrainingArguments(self.output_dir , disable_tqdm=A__ , report_to=[] , **A__ ) return Trainer( A__ , A__ , train_dataset=A__ , eval_dataset=A__ , callbacks=A__ , ) def a_ ( self : List[str] , A__ : Optional[int] , A__ : Dict ): """simple docstring""" self.assertEqual(len(A__ ) , len(A__ ) ) # Order doesn't matter __lowerCamelCase : List[Any] = sorted(A__ , key=lambda A__ : cb.__name__ if isinstance(A__ , A__ ) else cb.__class__.__name__ ) __lowerCamelCase : int = sorted(A__ , key=lambda A__ : cb.__name__ if isinstance(A__ , A__ ) else cb.__class__.__name__ ) for cba, cba in zip(A__ , A__ ): if isinstance(A__ , A__ ) and isinstance(A__ , A__ ): self.assertEqual(A__ , A__ ) elif isinstance(A__ , A__ ) and not isinstance(A__ , A__ ): self.assertEqual(A__ , cba.__class__ ) elif not isinstance(A__ , A__ ) and isinstance(A__ , A__ ): self.assertEqual(cba.__class__ , A__ ) else: self.assertEqual(A__ , A__ ) def a_ ( self : Optional[int] , A__ : List[Any] ): """simple docstring""" __lowerCamelCase : Tuple = ["""on_init_end""", """on_train_begin"""] __lowerCamelCase : str = 0 __lowerCamelCase : List[Any] = len(trainer.get_eval_dataloader() ) __lowerCamelCase : List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(A__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def a_ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase : int = self.get_trainer() __lowerCamelCase : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) # Callbacks passed at init are added to the default callbacks __lowerCamelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowerCamelCase : List[Any] = self.get_trainer(disable_tqdm=A__ ) __lowerCamelCase : int = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) def a_ ( self : List[str] ): """simple docstring""" __lowerCamelCase : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowerCamelCase : Optional[int] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(A__ ) expected_callbacks.remove(A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) __lowerCamelCase : List[Any] = self.get_trainer() __lowerCamelCase : Any = trainer.pop_callback(A__ ) self.assertEqual(cb.__class__ , A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) trainer.add_callback(A__ ) expected_callbacks.insert(0 , A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) # We can also add, pop, or remove by instance __lowerCamelCase : Optional[int] = self.get_trainer() __lowerCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(A__ ) expected_callbacks.remove(A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) __lowerCamelCase : int = self.get_trainer() __lowerCamelCase : Dict = trainer.callback_handler.callbacks[0] __lowerCamelCase : Dict = trainer.pop_callback(A__ ) self.assertEqual(A__ , A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) trainer.add_callback(A__ ) expected_callbacks.insert(0 , A__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ ) def a_ ( self : str ): """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=A__ ) __lowerCamelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowerCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) # Independent log/save/eval __lowerCamelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() __lowerCamelCase : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) __lowerCamelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() __lowerCamelCase : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) __lowerCamelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() __lowerCamelCase : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) __lowerCamelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() __lowerCamelCase : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) # A bit of everything __lowerCamelCase : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() __lowerCamelCase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(A__ , self.get_expected_events(A__ ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: __lowerCamelCase : Union[str, Any] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(A__ ) in warn_mock.call_args[0][0]
720
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]: """simple docstring""" # Initialise PyTorch model __lowerCamelCase : str = RemBertConfig.from_json_file(_lowercase ) print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) ) __lowerCamelCase : List[Any] = RemBertModel(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowercase, _lowercase, _lowercase ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_lowercase ) ) torch.save(model.state_dict(), _lowercase ) if __name__ == "__main__": UpperCAmelCase__ :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ :List[Any] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
483
0
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) UpperCAmelCase_ = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) UpperCAmelCase_ = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) UpperCAmelCase_ = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions UpperCAmelCase_ = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) UpperCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image) UpperCAmelCase_ = np.expand_dims(test_image, axis=0) UpperCAmelCase_ = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: UpperCAmelCase_ = """Normal""" if result[0][0] == 1: UpperCAmelCase_ = """Abnormality detected"""
2
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowercase : '''simple docstring''' __SCREAMING_SNAKE_CASE = BlenderbotSmallConfig __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = """gelu""" def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = seq_length UpperCAmelCase__ = is_training UpperCAmelCase__ = use_labels UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = eos_token_id UpperCAmelCase__ = pad_token_id UpperCAmelCase__ = bos_token_id def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder() UpperCAmelCase__ = inputs_dict['input_ids'] UpperCAmelCase__ = input_ids[:1, :] UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :] UpperCAmelCase__ = inputs_dict['head_mask'] UpperCAmelCase__ = 1 # first forward pass UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 ) UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) UpperCAmelCase__ = model(__a , attention_mask=__a )[0] UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx] UpperCAmelCase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1E-3 ) def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int: if attention_mask is None: UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: UpperCAmelCase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __SCREAMING_SNAKE_CASE = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = TFBlenderbotSmallModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class lowercase ( unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] __SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M""" @cached_property def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' ) UpperCAmelCase__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
146
0
'''simple docstring''' def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ): '''simple docstring''' return int((input_a, input_a).count(0 ) != 0 ) def __lowerCamelCase ( ): '''simple docstring''' assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
710
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) lowercase__ : Tuple = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ): '''simple docstring''' inspect_dataset(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ): '''simple docstring''' inspect_metric(_UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ = path + '''.py''' assert script_name in os.listdir(_UpperCamelCase ) assert "__pycache__" not in os.listdir(_UpperCamelCase ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert list(infos.keys() ) == expected_configs UpperCAmelCase_ = expected_configs[0] assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ): '''simple docstring''' UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase ) assert expected_config in infos UpperCAmelCase_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ): '''simple docstring''' with pytest.raises(_UpperCamelCase ): get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
43
0
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( '''The `image_to_image.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionImg2ImgPipeline` instead.''' )
561
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase :Optional[int] = get_tests_dir('''fixtures''') lowerCAmelCase :Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') lowerCAmelCase :Tuple = get_tests_dir('''fixtures/dummy-config.json''') class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self : Union[str, Any] ) -> int: __magic_name__ : str = 0 def __lowerCAmelCase ( self : int ) -> Tuple: __magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> List[Any]: __magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ : Union[str, Any] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(_A ).to_dict() config_dict.pop('feature_extractor_type' ) __magic_name__ : int = WavaVecaFeatureExtractor(**_A ) # save in new folder model_config.save_pretrained(_A ) config.save_pretrained(_A ) __magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A ) # make sure private variable is not incorrectly saved __magic_name__ : List[str] = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : int ) -> Union[str, Any]: __magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def __lowerCAmelCase ( self : List[str] ) -> Optional[int]: with self.assertRaisesRegex( _A , 'bert-base is not a local folder and is not a valid model identifier' ): __magic_name__ : str = AutoFeatureExtractor.from_pretrained('bert-base' ) def __lowerCAmelCase ( self : Any ) -> Tuple: with self.assertRaisesRegex( _A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): __magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa' ) def __lowerCAmelCase ( self : Dict ) -> str: with self.assertRaisesRegex( _A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): __magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_A ): __magic_name__ : Dict = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): __magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A ) __magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_A ) __magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) def __lowerCAmelCase ( self : str ) -> Tuple: try: AutoConfig.register('custom' , _A ) AutoFeatureExtractor.register(_A , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoFeatureExtractor.register(_A , _A ) # Now that the config is registered, it can be used as any other config with the auto-API __magic_name__ : str = CustomFeatureExtractor.from_pretrained(_A ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_A ) __magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self : Optional[Any] ) -> Dict: class _lowerCamelCase ( lowercase__ ): '''simple docstring''' A_ : Any = True try: AutoConfig.register('custom' , _A ) AutoFeatureExtractor.register(_A , _A ) # If remote code is not set, the default is to use local __magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __magic_name__ : str = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(not hasattr(_A , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
561
1
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _UpperCamelCase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Union[str, Any]: '''simple docstring''' __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = '''gelu''' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' __lowercase = TFConvBertModel(config=lowerCAmelCase__ ) __lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(lowerCAmelCase__ ) __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowercase = TFConvBertForMaskedLM(config=lowerCAmelCase__ ) __lowercase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=lowerCAmelCase__ ) __lowercase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=lowerCAmelCase__ ) __lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) ) __lowercase = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=lowerCAmelCase__ ) __lowercase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' __lowercase = TFConvBertForQuestionAnswering(config=lowerCAmelCase__ ) __lowercase = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } __lowercase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" __a : Optional[Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) __a : Optional[int] = ( { '''feature-extraction''': TFConvBertModel, '''fill-mask''': TFConvBertForMaskedLM, '''question-answering''': TFConvBertForQuestionAnswering, '''text-classification''': TFConvBertForSequenceClassification, '''token-classification''': TFConvBertForTokenClassification, '''zero-shot''': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) __a : Union[str, Any] = False __a : List[str] = False __a : int = False def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ ) @slow def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(lowerCAmelCase__ , '''use_cache''' ): __lowercase = True __lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = model_class(lowerCAmelCase__ ) __lowercase = len(model(lowerCAmelCase__ ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ ) __lowercase = os.path.join(lowerCAmelCase__ , '''saved_model''' , '''1''' ) __lowercase = tf.keras.models.load_model(lowerCAmelCase__ ) __lowercase = model(lowerCAmelCase__ ) if self.is_encoder_decoder: __lowercase = outputs['''encoder_hidden_states'''] __lowercase = outputs['''encoder_attentions'''] else: __lowercase = outputs['''hidden_states'''] __lowercase = outputs['''attentions'''] self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __lowercase = getattr( self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) self.assertIsNotNone(lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ ) __lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ ) def check_decoder_attentions_output(lowerCAmelCase__ ): __lowercase = len(lowerCAmelCase__ ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(lowerCAmelCase__ ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(lowerCAmelCase__ ) __lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __lowercase = len(lowerCAmelCase__ ) self.assertEqual(config.output_hidden_states , lowerCAmelCase__ ) check_encoder_attentions_output(lowerCAmelCase__ ) if self.is_encoder_decoder: __lowercase = model_class(lowerCAmelCase__ ) __lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(config.output_hidden_states , lowerCAmelCase__ ) check_decoder_attentions_output(lowerCAmelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(lowerCAmelCase__ ) __lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(config.output_hidden_states , lowerCAmelCase__ ) check_encoder_attentions_output(lowerCAmelCase__ ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(lowerCAmelCase__ ) __lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) ) self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ ) check_encoder_attentions_output(lowerCAmelCase__ ) @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(lowerCAmelCase__ )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , lowerCAmelCase__ ) __lowercase = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
522
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = tempfile.mkdtemp() __lowercase = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowercase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073], '''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711], } __lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[str]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Tuple: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.get_tokenizer() __lowercase = self.get_rust_tokenizer() __lowercase = self.get_image_processor() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowercase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ ) __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowercase = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ ) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 ) __lowercase = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' ) __lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __lowercase = '''lower newer''' __lowercase = processor(text=lowerCAmelCase__ ) __lowercase = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __lowercase = '''lower newer''' __lowercase = self.prepare_image_inputs() __lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__ ): processor() def _SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase = processor.batch_decode(lowerCAmelCase__ ) __lowercase = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.get_image_processor() __lowercase = self.get_tokenizer() __lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ ) __lowercase = '''lower newer''' __lowercase = self.prepare_image_inputs() __lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
522
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowerCAmelCase__ = logging.get_logger(__name__) class __snake_case ( _lowercase): snake_case__ : Dict = ["audio_values", "audio_mask"] def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=2_0_4_8 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=[1_6, 1_6] , __lowerCAmelCase : Optional[Any]=1_2_8 , __lowerCAmelCase : Optional[int]=4_4_1_0_0 , __lowerCAmelCase : Optional[Any]=8_6 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : Tuple=0.0 , **__lowerCAmelCase : Dict , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , ) _lowerCamelCase : str = spectrogram_length _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = patch_size _lowerCamelCase : Optional[Any] = feature_size // self.patch_size[1] _lowerCamelCase : Any = n_fft _lowerCamelCase : int = sampling_rate // hop_length_to_sampling_rate _lowerCamelCase : Optional[int] = sampling_rate _lowerCamelCase : Optional[Any] = padding_value _lowerCamelCase : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.array ): """simple docstring""" _lowerCamelCase : Tuple = spectrogram( __lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) _lowerCamelCase : Union[str, Any] = log_spec[:, :-1] _lowerCamelCase : Optional[Any] = log_spec - 20.0 _lowerCamelCase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[str] , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _lowerCamelCase : Union[str, Any] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) _lowerCamelCase : List[str] = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowerCamelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ): _lowerCamelCase : Any = np.asarray(__lowerCAmelCase , dtype=np.floataa ) elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowerCamelCase : Tuple = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _lowerCamelCase : int = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowerCAmelCase ): _lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _lowerCamelCase : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _lowerCamelCase : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _lowerCamelCase : str = np.array(__lowerCAmelCase ).astype(np.floataa ) # convert into correct format for padding _lowerCamelCase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _lowerCamelCase : Optional[int] = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _lowerCamelCase : int = padded_audio_features * self.padding_value for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase : List[str] = audio_features[i] _lowerCamelCase : Optional[Any] = feature # return as BatchFeature if return_attention_mask: _lowerCamelCase : Union[str, Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: _lowerCamelCase : Any = {'''audio_values''': padded_audio_features} _lowerCamelCase : Optional[int] = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) return encoded_inputs
83
"""simple docstring""" import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class a : def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ): """simple docstring""" lowerCAmelCase = OpenLlamaModel(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) lowerCAmelCase = model(_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = OpenLlamaModel(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , ) lowerCAmelCase = model(_snake_case , attention_mask=_snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ): """simple docstring""" lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case ) model.to(_snake_case ) model.eval() # first forward pass lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , ) lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] lowerCAmelCase = model( _snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) ,( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a ( a__ , a__ , a__ , unittest.TestCase ): snake_case__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else () snake_case__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case__ = False snake_case__ = False def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = OpenLlamaModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*_snake_case ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'single_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = 'multi_label_classification' lowerCAmelCase = input_dict['input_ids'] lowerCAmelCase = input_ids.ne(1 ).to(_snake_case ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case ) model.to(_snake_case ) model.eval() lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' ) def UpperCamelCase__ ( self ): """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCamelCase__ ( self , _snake_case ): """simple docstring""" lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = OpenLlamaModel(_snake_case ) original_model.to(_snake_case ) original_model.eval() lowerCAmelCase = original_model(_snake_case ).last_hidden_state lowerCAmelCase = original_model(_snake_case ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase = {'type': scaling_type, 'factor': 10.0} lowerCAmelCase = OpenLlamaModel(_snake_case ) scaled_model.to(_snake_case ) scaled_model.eval() lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
4
0
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __A : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): """simple docstring""" __UpperCAmelCase : str = 'vision-encoder-decoder' __UpperCAmelCase : Union[str, Any] = True def __init__( self : Optional[Any] , **lowercase__ : int ): super().__init__(**lowercase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'A configuraton of type {self.model_type} cannot be instantiated because ' f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) __lowercase : int = kwargs.pop("encoder" ) __lowercase : Optional[int] = encoder_config.pop("model_type" ) __lowercase : Union[str, Any] = kwargs.pop("decoder" ) __lowercase : str = decoder_config.pop("model_type" ) __lowercase : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) __lowercase : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) __lowercase : Optional[Any] = True @classmethod def snake_case ( cls : Tuple , lowercase__ : PretrainedConfig , lowercase__ : PretrainedConfig , **lowercase__ : Any ): logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) __lowercase : Any = True __lowercase : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase__ ) def snake_case ( self : Optional[Any] ): __lowercase : Tuple = copy.deepcopy(self.__dict__ ) __lowercase : List[Any] = self.encoder.to_dict() __lowercase : Tuple = self.decoder.to_dict() __lowercase : Union[str, Any] = self.__class__.model_type return output class lowerCAmelCase__ ( UpperCAmelCase__ ): """simple docstring""" __UpperCAmelCase : Any = version.parse("1.11" ) @property def snake_case ( self : Dict ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def snake_case ( self : int ): return 1e-4 @property def snake_case ( self : List[Any] ): return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class lowerCAmelCase__ ( UpperCAmelCase__ ): """simple docstring""" @property def snake_case ( self : Union[str, Any] ): __lowercase : Any = OrderedDict() __lowercase : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __lowercase : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __lowercase : int = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def snake_case ( self : List[Any] , lowercase__ : "PreTrainedTokenizerBase" , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional["TensorType"] = None , ): import torch __lowercase : Optional[Any] = OrderedDict() __lowercase : Dict = super().generate_dummy_inputs( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) __lowercase : int = dummy_input["""input_ids"""].shape __lowercase : Any = (batch, encoder_sequence, self._config.encoder_hidden_size) __lowercase : List[str] = dummy_input.pop("input_ids" ) __lowercase : Tuple = dummy_input.pop("attention_mask" ) __lowercase : Tuple = torch.zeros(lowercase__ ) return common_inputs class lowerCAmelCase__ ( UpperCAmelCase__ ): """simple docstring""" @property def snake_case ( self : Tuple ): pass def snake_case ( self : str , lowercase__ : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(lowercase__ ) def snake_case ( self : Dict , lowercase__ : PretrainedConfig , lowercase__ : PretrainedConfig , lowercase__ : str = "default" ): __lowercase : int = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(lowercase__ , lowercase__ )
714
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class lowerCAmelCase__ : """simple docstring""" def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : Tuple=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : List[str]=False , lowercase__ : Tuple=True , lowercase__ : int=False , lowercase__ : List[str]=False , lowercase__ : Optional[Any]=1_9 , lowercase__ : int=3_2 , lowercase__ : List[Any]=5 , lowercase__ : Optional[int]=4 , lowercase__ : Any=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : int=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : List[Any]=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : Union[str, Any]=2 , lowercase__ : List[str]=0.0_2 , lowercase__ : List[Any]=3 , lowercase__ : Any=4 , lowercase__ : Optional[Any]=None , ): __lowercase : int = parent __lowercase : Tuple = batch_size __lowercase : Optional[int] = seq_length __lowercase : str = is_training __lowercase : List[Any] = use_input_mask __lowercase : Any = use_token_type_ids __lowercase : str = use_labels __lowercase : Dict = vocab_size __lowercase : Optional[int] = hidden_size __lowercase : List[Any] = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : Union[str, Any] = intermediate_size __lowercase : Dict = hidden_act __lowercase : Union[str, Any] = hidden_dropout_prob __lowercase : Optional[int] = attention_probs_dropout_prob __lowercase : Optional[Any] = max_position_embeddings __lowercase : List[str] = type_vocab_size __lowercase : str = type_sequence_label_size __lowercase : List[str] = initializer_range __lowercase : Optional[Any] = num_labels __lowercase : Tuple = num_choices __lowercase : Optional[Any] = scope def snake_case ( self : Optional[int] ): __lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Union[str, Any] = None if self.use_input_mask: __lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : Dict = None __lowercase : Any = None __lowercase : Optional[Any] = None if self.use_labels: __lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self : Any ): __lowercase : Dict = EsmConfig( vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , ) return config def snake_case ( self : str , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : str ): __lowercase : List[Any] = EsmForProteinFolding(config=lowercase__ ).float() model.to(lowercase__ ) model.eval() __lowercase : List[str] = model(lowercase__ , attention_mask=lowercase__ ) __lowercase : Any = model(lowercase__ ) __lowercase : Any = model(lowercase__ ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def snake_case ( self : Tuple ): __lowercase : List[str] = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) : Any = config_and_inputs __lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[Any] = (EsmForProteinFolding,) if is_torch_available() else () __UpperCAmelCase : int = () __UpperCAmelCase : Tuple = {} if is_torch_available() else {} __UpperCAmelCase : Optional[Any] = False def snake_case ( self : Tuple ): __lowercase : Tuple = EsmFoldModelTester(self ) __lowercase : Dict = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 ) def snake_case ( self : Dict ): self.config_tester.run_common_tests() def snake_case ( self : Optional[int] ): __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) @unittest.skip("Does not support attention outputs" ) def snake_case ( self : Union[str, Any] ): pass @unittest.skip def snake_case ( self : Tuple ): pass @unittest.skip("Esm does not support embedding resizing" ) def snake_case ( self : Optional[int] ): pass @unittest.skip("Esm does not support embedding resizing" ) def snake_case ( self : List[str] ): pass @unittest.skip("ESMFold does not support passing input embeds!" ) def snake_case ( self : int ): pass @unittest.skip("ESMFold does not support head pruning." ) def snake_case ( self : List[Any] ): pass @unittest.skip("ESMFold does not support head pruning." ) def snake_case ( self : Any ): pass @unittest.skip("ESMFold does not support head pruning." ) def snake_case ( self : Optional[Any] ): pass @unittest.skip("ESMFold does not support head pruning." ) def snake_case ( self : List[str] ): pass @unittest.skip("ESMFold does not support head pruning." ) def snake_case ( self : List[str] ): pass @unittest.skip("ESMFold does not output hidden states in the normal way." ) def snake_case ( self : Optional[Any] ): pass @unittest.skip("ESMfold does not output hidden states in the normal way." ) def snake_case ( self : Optional[Any] ): pass @unittest.skip("ESMFold only has one output format." ) def snake_case ( self : Tuple ): pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" ) def snake_case ( self : Any ): pass @unittest.skip("ESMFold does not support input chunking." ) def snake_case ( self : str ): pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." ) def snake_case ( self : Any ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def snake_case ( self : Any ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def snake_case ( self : Tuple ): pass @unittest.skip("ESMFold doesn't support torchscript compilation." ) def snake_case ( self : List[str] ): pass @unittest.skip("ESMFold doesn't support data parallel." ) def snake_case ( self : Optional[Any] ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def snake_case ( self : Any ): pass @require_torch class lowerCAmelCase__ ( lowerCAmelCase_ ): """simple docstring""" @slow def snake_case ( self : Union[str, Any] ): __lowercase : Tuple = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float() model.eval() __lowercase : Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) __lowercase : str = model(lowercase__ )["positions"] __lowercase : Union[str, Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase__ , atol=1e-4 ) )
281
0
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )} , ) _snake_case : Optional[str] = field( default=lowercase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _snake_case : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __a ( self :Tuple ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) _snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) _snake_case : Optional[str] = field( default=lowercase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) _snake_case : bool = field( default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) _snake_case : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) _snake_case : Optional[int] = field( default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) _snake_case : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) _snake_case : bool = field( default=lowercase , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def __a ( self :Dict ): if self.train_file is not None: UpperCamelCase__ :Optional[Any] = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: UpperCamelCase__ :Optional[int] = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def A ( lowercase__ : Optional[Any] , lowercase__ : str ) -> List[Any]: with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase__ :Dict = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())] assert len(lowercase__ ) == len(lowercase__ ) UpperCamelCase__ :int = {c: dataset[c] for c in dataset.column_names} UpperCamelCase__ :List[Any] = refs return Dataset.from_dict(lowercase__ ) def A ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase__ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = parser.parse_args_into_dataclasses() # Detecting last checkpoint. UpperCamelCase__ :int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase__ :Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowercase__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCamelCase__ :List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): UpperCamelCase__ :Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , ) UpperCamelCase__ :Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , ) else: UpperCamelCase__ :Union[str, Any] = {} if data_args.train_file is not None: UpperCamelCase__ :List[Any] = data_args.train_file if data_args.validation_file is not None: UpperCamelCase__ :str = data_args.validation_file UpperCamelCase__ :Tuple = data_args.train_file.split(""".""" )[-1] if extension == "txt": UpperCamelCase__ :List[str] = """text""" UpperCamelCase__ :Optional[int] = load_dataset(lowercase__ , data_files=lowercase__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ :Union[str, Any] = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase__ :List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: UpperCamelCase__ :Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) UpperCamelCase__ :Union[str, Any] = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: UpperCamelCase__ :Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ ) elif model_args.model_name_or_path: UpperCamelCase__ :Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: UpperCamelCase__ :Tuple = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase__ :Optional[Any] = AutoModelForMaskedLM.from_config(lowercase__ ) model.resize_token_embeddings(len(lowercase__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: UpperCamelCase__ :Dict = datasets["""train"""].column_names else: UpperCamelCase__ :str = datasets["""validation"""].column_names UpperCamelCase__ :Optional[int] = """text""" if """text""" in column_names else column_names[0] UpperCamelCase__ :str = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(lowercase__ : str ): # Remove empty lines UpperCamelCase__ :List[str] = [line for line in examples["""text"""] if len(lowercase__ ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length ) UpperCamelCase__ :int = datasets.map( lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: UpperCamelCase__ :Tuple = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: UpperCamelCase__ :Tuple = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer UpperCamelCase__ :Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: UpperCamelCase__ :List[str] = False # Data collator # This one will take care of randomly masking the tokens. UpperCamelCase__ :str = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCamelCase__ :Union[str, Any] = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: if last_checkpoint is not None: UpperCamelCase__ :List[Any] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): UpperCamelCase__ :int = model_args.model_name_or_path else: UpperCamelCase__ :Optional[Any] = None UpperCamelCase__ :List[Any] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() # Saves the tokenizer too for easy upload UpperCamelCase__ :int = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(lowercase__ , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation UpperCamelCase__ :Optional[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCamelCase__ :str = trainer.evaluate() UpperCamelCase__ :Dict = math.exp(eval_output["""eval_loss"""] ) UpperCamelCase__ :int = perplexity UpperCamelCase__ :Union[str, Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(lowercase__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) return results def A ( lowercase__ : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
0
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} _snake_case = features.copy() if features else default_expected_features _snake_case = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} _snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _snake_case = text_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _snake_case = [text_path] _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} _snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ): '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: _snake_case = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _snake_case = {"text": "string"} _snake_case = features.copy() if features else default_expected_features _snake_case = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) _snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' if split: _snake_case = {split: text_path} else: _snake_case = "train" _snake_case = {"train": text_path, "test": text_path} _snake_case = tmp_path / "cache" _snake_case = {"text": "string"} _snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
705
'''simple docstring''' import math class __SCREAMING_SNAKE_CASE : '''simple docstring''' def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ): _snake_case = 0.0 _snake_case = 0.0 for i in range(len(lowerCamelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): for i in range(len(lowerCamelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def snake_case_ ( ): '''simple docstring''' _snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) _snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training _snake_case = SelfOrganizingMap() _snake_case = 3 _snake_case = 0.5 for _ in range(SCREAMING_SNAKE_CASE__ ): for j in range(len(SCREAMING_SNAKE_CASE__ ) ): # training sample _snake_case = training_samples[j] # Compute the winning vector _snake_case = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Update the winning vector _snake_case = self_organizing_map.update(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # classify test sample _snake_case = [0, 0, 0, 1] _snake_case = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # results print(f'''Clusters that the test sample belongs to : {winner}''' ) print(f'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
368
0
"""simple docstring""" def __UpperCamelCase ( snake_case__ ): if p < 2: raise ValueError("""p should not be less than 2!""" ) elif p == 2: return True A_ : Dict = 4 A_ : int = (1 << p) - 1 for _ in range(p - 2 ): A_ : Union[str, Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
180
"""simple docstring""" import math import os import sys def __UpperCamelCase ( snake_case__ ): A_ : Optional[Any] = """""" try: with open(snake_case__ , """rb""" ) as binary_file: A_ : Union[str, Any] = binary_file.read() for dat in data: A_ : Dict = F"""{dat:08b}""" result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): lexicon.pop(snake_case__ ) A_ : List[str] = last_match_id if math.loga(snake_case__ ).is_integer(): for curr_key in lexicon: A_ : Dict = """0""" + lexicon[curr_key] A_ : int = bin(snake_case__ )[2:] def __UpperCamelCase ( snake_case__ ): A_ : Dict = {"""0""": """0""", """1""": """1"""} A_ , A_ : Optional[int] = """""", """""" A_ : Tuple = len(snake_case__ ) for i in range(len(snake_case__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A_ : List[str] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) index += 1 A_ : int = """""" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": A_ : Any = lexicon[curr_string] result += last_match_id return result def __UpperCamelCase ( snake_case__ , snake_case__ ): A_ : Optional[int] = os.path.getsize(snake_case__ ) A_ : Dict = bin(snake_case__ )[2:] A_ : Optional[Any] = len(snake_case__ ) return "0" * (length_length - 1) + file_length_binary + compressed def __UpperCamelCase ( snake_case__ , snake_case__ ): A_ : Tuple = 8 try: with open(snake_case__ , """wb""" ) as opened_file: A_ : Dict = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case__ ) , snake_case__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def __UpperCamelCase ( snake_case__ , snake_case__ ): A_ : List[str] = read_file_binary(snake_case__ ) A_ : str = compress_data(snake_case__ ) A_ : int = add_file_length(snake_case__ , snake_case__ ) write_file_binary(snake_case__ , snake_case__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
180
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __a = { '''configuration_chinese_clip''': [ '''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ChineseCLIPConfig''', '''ChineseCLIPOnnxConfig''', '''ChineseCLIPTextConfig''', '''ChineseCLIPVisionConfig''', ], '''processing_chinese_clip''': ['''ChineseCLIPProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''ChineseCLIPFeatureExtractor'''] __a = ['''ChineseCLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ChineseCLIPModel''', '''ChineseCLIPPreTrainedModel''', '''ChineseCLIPTextModel''', '''ChineseCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
710
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class __a( unittest.TestCase ): """simple docstring""" def a__ ( self ) -> List[Any]: UpperCAmelCase_ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase_ : int = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) ) UpperCAmelCase_ : List[Any] = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase_ : Dict = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16_000, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase_ : str = tempfile.mkdtemp() UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' ) # load decoder from hub UpperCAmelCase_ : str = '''hf-internal-testing/ngram-beam-search-decoder''' def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : int = self.add_kwargs_tokens_map.copy() kwargs.update(_SCREAMING_SNAKE_CASE ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> int: shutil.rmtree(self.tmpdirname ) def a__ ( self ) -> int: UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_feature_extractor() UpperCAmelCase_ : Tuple = self.get_decoder() UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor ,_SCREAMING_SNAKE_CASE ) # decoder self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,) self.assertIsInstance(processor.decoder ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha ,5.0 ) self.assertEqual(processor.language_model.beta ,3.0 ) self.assertEqual(processor.language_model.score_boundary ,-7.0 ) self.assertEqual(processor.language_model.unk_score_offset ,3 ) def a__ ( self ) -> Dict: UpperCAmelCase_ : str = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''include''' ): WavaVecaProcessorWithLM( tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Any = self.get_feature_extractor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_decoder() UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = floats_list((3, 1_000) ) UpperCAmelCase_ : Any = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) UpperCAmelCase_ : Any = processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor() UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = self.get_decoder() UpperCAmelCase_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = '''This is a test string''' UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = tokenizer(_SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def a__ ( self ,_SCREAMING_SNAKE_CASE=(2, 10, 16) ,_SCREAMING_SNAKE_CASE=77 ) -> int: np.random.seed(_SCREAMING_SNAKE_CASE ) return np.random.rand(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Dict: UpperCAmelCase_ : int = self.get_feature_extractor() UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = self.get_decoder() UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = self._get_dummy_logits(shape=(10, 16) ,seed=13 ) UpperCAmelCase_ : Optional[Any] = processor.decode(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0] self.assertEqual(decoded_decoder[0] ,decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text ) self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_feature_extractor() UpperCAmelCase_ : Optional[Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_decoder() UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase_ : Dict = processor.batch_decode(_SCREAMING_SNAKE_CASE ) else: with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool: UpperCAmelCase_ : str = processor.batch_decode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = list(_SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text ) self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.logit_score ) self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.lm_score ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = self.get_feature_extractor() UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_decoder() UpperCAmelCase_ : Any = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits() UpperCAmelCase_ : List[Any] = 15 UpperCAmelCase_ : Optional[Any] = -20.0 UpperCAmelCase_ : Tuple = -4.0 UpperCAmelCase_ : Union[str, Any] = processor.batch_decode( _SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = decoded_processor_out.text UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ : List[str] = decoder.decode_beams_batch( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : str = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase_ : Dict = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_SCREAMING_SNAKE_CASE ) self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = self.get_feature_extractor() UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = self.get_decoder() UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits() UpperCAmelCase_ : List[Any] = 2.0 UpperCAmelCase_ : Optional[int] = 5.0 UpperCAmelCase_ : List[Any] = -20.0 UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : str = processor.batch_decode( _SCREAMING_SNAKE_CASE ,alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : Tuple = decoded_processor_out.text UpperCAmelCase_ : Optional[Any] = list(_SCREAMING_SNAKE_CASE ) decoder.reset_params( alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[str] = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha ,2.0 ) self.assertEqual(lm_model.beta ,5.0 ) self.assertEqual(lm_model.unk_score_offset ,-20.0 ) self.assertEqual(lm_model.score_boundary ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : Tuple = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ : Any = os.listdir(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : int = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase_ : List[Any] = os.listdir(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = os.listdir(_SCREAMING_SNAKE_CASE ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : Tuple = floats_list((3, 1_000) ) UpperCAmelCase_ : Optional[Any] = processor_wavaveca(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) UpperCAmelCase_ : List[str] = processor_auto(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 ) UpperCAmelCase_ : Any = self._get_dummy_logits() UpperCAmelCase_ : int = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text ) def a__ ( self ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Any = self.get_decoder() UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) self.assertListEqual( processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,) @staticmethod def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: UpperCAmelCase_ : int = [d[key] for d in offsets] return retrieved_list def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits()[0] UpperCAmelCase_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase_ : int = self._get_dummy_logits() UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) ,4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] ) @slow @require_torch @require_torchaudio def a__ ( self ) -> Union[str, Any]: import torch UpperCAmelCase_ : List[str] = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Dict = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase_ : Tuple = iter(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = next(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase_ : Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase_ : List[str] = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy() UpperCAmelCase_ : str = processor.decode(logits[0] ,output_word_offsets=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase_ : Any = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase_ : Any = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,_SCREAMING_SNAKE_CASE ) self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,output.text ) # output times UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''start_time''' ) ) UpperCAmelCase_ : str = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''end_time''' ) ) # fmt: off UpperCAmelCase_ : str = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) ) self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
300
0
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
38
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
719
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
0
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness a : Tuple = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' a : Optional[int] = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' a : Tuple = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' a : Optional[int] = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' a : Optional[int] = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def A ( self : Dict ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def A ( self : str , a_ : List[Any] , a_ : Optional[int] , a_ : Any=[1, 10, 100] , a_ : Optional[int]=4 , a_ : Optional[int]=3.0 ): """simple docstring""" if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=a_ ) as executor: __snake_case = [] __snake_case = Counter() __snake_case = 0 __snake_case = defaultdict(a_ ) for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ): for candidate in candidates: __snake_case = candidate + "\n" + test_case __snake_case = (test_program, timeout, task_id, completion_id[task_id]) __snake_case = executor.submit(a_ , *a_ ) futures.append(a_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(a_ ): __snake_case = future.result() results[result["task_id"]].append((result["completion_id"], result) ) __snake_case , __snake_case = [], [] for result in results.values(): result.sort() __snake_case = [r[1]["passed"] for r in result] total.append(len(a_ ) ) correct.append(sum(a_ ) ) __snake_case = np.array(a_ ) __snake_case = np.array(a_ ) __snake_case = k __snake_case = {f'''pass@{k}''': estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> List[Any]: def estimator(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __snake_case = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase ) ) else: assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) __snake_case = iter(_UpperCAmelCase ) return np.array([estimator(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , _UpperCAmelCase ) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase )] )
69
class __magic_name__ : '''simple docstring''' def __init__( self: Optional[int] ): SCREAMING_SNAKE_CASE_ = {} def _A ( self: Optional[Any] ): print(self.vertex ) for i in self.vertex: print(_lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(_lowerCamelCase ) for j in self.vertex[i]] ) ) def _A ( self: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_lowerCamelCase ) else: # else make a new vertex SCREAMING_SNAKE_CASE_ = [to_vertex] def _A ( self: int ): # visited array for storing already visited nodes SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_lowerCamelCase , _lowerCamelCase ) def _A ( self: Dict , _lowerCamelCase: int , _lowerCamelCase: list ): # mark start vertex as visited SCREAMING_SNAKE_CASE_ = True print(_lowerCamelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
234
0
import cva import numpy as np class _UpperCamelCase : def __init__( self :Union[str, Any] , lowerCamelCase :float , lowerCamelCase :int ) -> str: if k in (0.04, 0.06): UpperCAmelCase__ = k UpperCAmelCase__ = window_size else: raise ValueError("invalid k value" ) def __str__( self :str ) -> str: return str(self.k ) def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> tuple[cva.Mat, list[list[int]]]: UpperCAmelCase__ = cva.imread(lowerCamelCase , 0 ) UpperCAmelCase__ , UpperCAmelCase__ = img.shape UpperCAmelCase__ = [] UpperCAmelCase__ = img.copy() UpperCAmelCase__ = cva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB ) UpperCAmelCase__ , UpperCAmelCase__ = np.gradient(lowerCamelCase ) UpperCAmelCase__ = dx**2 UpperCAmelCase__ = dy**2 UpperCAmelCase__ = dx * dy UpperCAmelCase__ = 0.04 UpperCAmelCase__ = self.window_size // 2 for y in range(lowerCamelCase , h - offset ): for x in range(lowerCamelCase , w - offset ): UpperCAmelCase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase__ = (wxx * wyy) - (wxy**2) UpperCAmelCase__ = wxx + wyy UpperCAmelCase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _lowerCAmelCase : Optional[int] = HarrisCorner(0.0_4, 3) _lowerCAmelCase, _lowerCAmelCase : Any = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
364
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowerCAmelCase : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowerCAmelCase ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : int = 1_6000 ): """simple docstring""" UpperCAmelCase__ = int(round(sample_rate * max_length ) ) if len(_lowerCAmelCase ) <= sample_length: return wav UpperCAmelCase__ = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _UpperCamelCase : UpperCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} ) UpperCAmelCase_ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCAmelCase_ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCAmelCase_ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCAmelCase_ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCAmelCase_ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _UpperCamelCase : UpperCAmelCase_ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} ) UpperCAmelCase_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) UpperCAmelCase_ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def UpperCAmelCase_ ( self :str ) -> List[Any]: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , lowerCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`." ) def lowerCAmelCase ( ): """simple docstring""" UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , _lowerCAmelCase , _lowerCAmelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase__ = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCAmelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. UpperCAmelCase__ = DatasetDict() UpperCAmelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' "Make sure to set `--audio_column_name` to the correct audio column - one of " F'''{", ".join(raw_datasets["train"].column_names )}.''' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ''' "Make sure to set `--label_column_name` to the correct text column - one of " F'''{", ".join(raw_datasets["train"].column_names )}.''' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCAmelCase__ = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCAmelCase__ = feature_extractor.model_input_names[0] def train_transforms(_lowerCAmelCase : Tuple ): UpperCAmelCase__ = [] for audio in batch[data_args.audio_column_name]: UpperCAmelCase__ = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowerCAmelCase ) UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )} UpperCAmelCase__ = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_lowerCAmelCase : Union[str, Any] ): UpperCAmelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]] UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )} UpperCAmelCase__ = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCAmelCase__ = raw_datasets["train"].features[data_args.label_column_name].names UpperCAmelCase__ , UpperCAmelCase__ = {}, {} for i, label in enumerate(_lowerCAmelCase ): UpperCAmelCase__ = str(_lowerCAmelCase ) UpperCAmelCase__ = label # Load the accuracy metric from the datasets package UpperCAmelCase__ = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase : Optional[Any] ): UpperCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids ) UpperCAmelCase__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCAmelCase__ = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCAmelCase__ = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCAmelCase__ = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase ) # Initialize our trainer UpperCAmelCase__ = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , ) # Training if training_args.do_train: UpperCAmelCase__ = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase__ = last_checkpoint UpperCAmelCase__ = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCAmelCase__ = trainer.evaluate() trainer.log_metrics("eval" , _lowerCAmelCase ) trainer.save_metrics("eval" , _lowerCAmelCase ) # Write model card and (optionally) push to hub UpperCAmelCase__ = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) if __name__ == "__main__": main()
364
1
from collections.abc import Callable class lowerCAmelCase__ : def __init__( self , a = None ) -> None: '''simple docstring''' _UpperCamelCase = [] # Stores indexes of each item for supporting updates and deletion. _UpperCamelCase = {} # Stores current size of heap. _UpperCamelCase = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. _UpperCamelCase = key or (lambda a : x) def A_ ( self , a ) -> int | None: '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def A_ ( self , a ) -> int | None: '''simple docstring''' _UpperCamelCase = int(2 * i + 1 ) return left if 0 < left < self.size else None def A_ ( self , a ) -> int | None: '''simple docstring''' _UpperCamelCase = int(2 * i + 2 ) return right if 0 < right < self.size else None def A_ ( self , a , a ) -> None: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. _UpperCamelCase , _UpperCamelCase = self.arr[j], self.arr[i] def A_ ( self , a , a ) -> bool: '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def A_ ( self , a ) -> int: '''simple docstring''' _UpperCamelCase = self._left(a ) _UpperCamelCase = self._right(a ) _UpperCamelCase = i if left is not None and not self._cmp(a , a ): _UpperCamelCase = left if right is not None and not self._cmp(a , a ): _UpperCamelCase = right return valid_parent def A_ ( self , a ) -> None: '''simple docstring''' _UpperCamelCase = self._parent(a ) while parent is not None and not self._cmp(a , a ): self._swap(a , a ) _UpperCamelCase , _UpperCamelCase = parent, self._parent(a ) def A_ ( self , a ) -> None: '''simple docstring''' _UpperCamelCase = self._get_valid_parent(a ) while valid_parent != index: self._swap(a , a ) _UpperCamelCase , _UpperCamelCase = valid_parent, self._get_valid_parent(a ) def A_ ( self , a , a ) -> None: '''simple docstring''' if item not in self.pos_map: return _UpperCamelCase = self.pos_map[item] _UpperCamelCase = [item, self.key(a )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(a ) self._heapify_down(a ) def A_ ( self , a ) -> None: '''simple docstring''' if item not in self.pos_map: return _UpperCamelCase = self.pos_map[item] del self.pos_map[item] _UpperCamelCase = self.arr[self.size - 1] _UpperCamelCase = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(a ) self._heapify_down(a ) def A_ ( self , a , a ) -> None: '''simple docstring''' _UpperCamelCase = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(a )] ) else: _UpperCamelCase = [item, self.key(a )] _UpperCamelCase = self.size self.size += 1 self._heapify_up(self.size - 1 ) def A_ ( self ) -> tuple | None: '''simple docstring''' return self.arr[0] if self.size else None def A_ ( self ) -> tuple | None: '''simple docstring''' _UpperCamelCase = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __A() -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
612
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCamelCase__ = TypeVar("T") lowerCamelCase__ = TypeVar("U") class lowerCAmelCase__ ( Generic[T, U] ): def __init__( self , a , a ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = key _UpperCamelCase = val _UpperCamelCase = None _UpperCamelCase = None def __repr__( self ) -> str: '''simple docstring''' return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next )}, has prev: {bool(self.prev )}' ) class lowerCAmelCase__ ( Generic[T, U] ): def __init__( self ) -> None: '''simple docstring''' _UpperCamelCase = DoubleLinkedListNode(a , a ) _UpperCamelCase = DoubleLinkedListNode(a , a ) _UpperCamelCase , _UpperCamelCase = self.rear, self.head def __repr__( self ) -> str: '''simple docstring''' _UpperCamelCase = ["""DoubleLinkedList"""] _UpperCamelCase = self.head while node.next is not None: rep.append(str(a ) ) _UpperCamelCase = node.next rep.append(str(self.rear ) ) return ",\n ".join(a ) def A_ ( self , a ) -> None: '''simple docstring''' _UpperCamelCase = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _UpperCamelCase = node _UpperCamelCase = previous _UpperCamelCase = node _UpperCamelCase = self.rear def A_ ( self , a ) -> DoubleLinkedListNode[T, U] | None: '''simple docstring''' if node.prev is None or node.next is None: return None _UpperCamelCase = node.next _UpperCamelCase = node.prev _UpperCamelCase = None _UpperCamelCase = None return node class lowerCAmelCase__ ( Generic[T, U] ): UpperCamelCase_ : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , a ) -> int: '''simple docstring''' _UpperCamelCase = DoubleLinkedList() _UpperCamelCase = capacity _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = {} def __repr__( self ) -> str: '''simple docstring''' return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , a ) -> bool: '''simple docstring''' return key in self.cache def A_ ( self , a ) -> U | None: '''simple docstring''' if key in self.cache: self.hits += 1 _UpperCamelCase = self.cache[key] _UpperCamelCase = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(a ) return node.val self.miss += 1 return None def A_ ( self , a , a ) -> None: '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _UpperCamelCase = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(a ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _UpperCamelCase = DoubleLinkedListNode(a , a ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _UpperCamelCase = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _UpperCamelCase = value self.list.add(a ) @classmethod def A_ ( cls , a = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: '''simple docstring''' def cache_decorator_inner(a ) -> Callable[..., U]: def cache_decorator_wrapper(*a ) -> U: if func not in cls.decorator_function_to_instance_map: _UpperCamelCase = LRUCache(a ) _UpperCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _UpperCamelCase = func(*a ) cls.decorator_function_to_instance_map[func].put(args[0] , a ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(a , """cache_info""" , a ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
612
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __lowercase ( _UpperCAmelCase ): UpperCamelCase = (DDPMScheduler,) def _lowercase ( self : List[str] , **__lowerCamelCase : List[Any] ) -> str: """simple docstring""" UpperCAmelCase = { """num_train_timesteps""": 1_0_0_0, """beta_start""": 0.0_001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__UpperCamelCase ) return config def _lowercase ( self : Any ) -> Union[str, Any]: """simple docstring""" for timesteps in [1, 5, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase ) def _lowercase ( self : int ) -> Tuple: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__UpperCamelCase ) def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__UpperCamelCase ) def _lowercase ( self : int ) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCamelCase ) def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=__UpperCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def _lowercase ( self : int ) -> int: """simple docstring""" for t in [0, 5_0_0, 9_9_9]: self.check_over_forward(time_step=__UpperCamelCase ) def _lowercase ( self : List[str] ) -> int: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5 def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = len(__UpperCamelCase ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter UpperCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__UpperCamelCase ) ): # 1. predict noise residual UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase = pred_prev_sample UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) ) UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = len(__UpperCamelCase ) UpperCAmelCase = self.dummy_model() UpperCAmelCase = self.dummy_sample_deter UpperCAmelCase = torch.manual_seed(0 ) for t in reversed(range(__UpperCamelCase ) ): # 1. predict noise residual UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) # 2. predict previous mean of sample x_t-1 UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCAmelCase = pred_prev_sample UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) ) UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0] scheduler.set_timesteps(timesteps=__UpperCamelCase ) UpperCAmelCase = scheduler.timesteps for i, timestep in enumerate(__UpperCamelCase ): if i == len(__UpperCamelCase ) - 1: UpperCAmelCase = -1 else: UpperCAmelCase = timesteps[i + 1] UpperCAmelCase = scheduler.previous_timestep(__UpperCamelCase ) UpperCAmelCase = prev_t.item() self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0] with self.assertRaises(__UpperCamelCase , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__UpperCamelCase ) def _lowercase ( self : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0] UpperCAmelCase = len(__UpperCamelCase ) with self.assertRaises(__UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase ) def _lowercase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = self.scheduler_classes[0] UpperCAmelCase = self.get_scheduler_config() UpperCAmelCase = scheduler_class(**__UpperCamelCase ) UpperCAmelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( __UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__UpperCamelCase )
719
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { """facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""", } class __lowercase ( __snake_case ): UpperCamelCase = '''nllb-moe''' UpperCamelCase = ['''past_key_values'''] UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int: """simple docstring""" UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = router_z_loss_coef UpperCAmelCase = router_aux_loss_coef UpperCAmelCase = decoder_sparse_step UpperCAmelCase = encoder_sparse_step UpperCAmelCase = num_experts UpperCAmelCase = expert_capacity UpperCAmelCase = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) UpperCAmelCase = router_dtype UpperCAmelCase = router_ignore_padding_tokens UpperCAmelCase = batch_prioritized_routing UpperCAmelCase = second_expert_policy UpperCAmelCase = normalize_router_prob_before_dropping UpperCAmelCase = moe_eval_capacity_token_fraction UpperCAmelCase = moe_token_dropout UpperCAmelCase = output_router_logits super().__init__( pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
627
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __A = 16 __A = 32 def __A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ): '''simple docstring''' _A = AutoTokenizer.from_pretrained(_lowercase ) _A = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_lowercase ): # max_length=None => use the model max length (it's actually the default) _A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _A = datasets.map( _lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' ) return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. _A = DataLoader( tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) _A = DataLoader( tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase ) return train_dataloader, eval_dataloader def __A ( _lowercase , _lowercase ): '''simple docstring''' _A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A = config['''lr'''] _A = int(config['''num_epochs'''] ) _A = int(config['''seed'''] ) _A = int(config['''batch_size'''] ) _A = args.model_name_or_path set_seed(_lowercase ) _A ,_A = get_dataloaders(_lowercase , _lowercase , _lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase ) # Instantiate optimizer _A = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _A = optimizer_cls(params=model.parameters() , lr=_lowercase ) if accelerator.state.deepspeed_plugin is not None: _A = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: _A = 1 _A = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _A = get_linear_schedule_with_warmup( optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , ) else: _A = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A ,_A ,_A ,_A ,_A = accelerator.prepare( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # We need to keep track of how many total steps we have iterated over _A = 0 # We also need to keep track of the stating epoch so files are named properly _A = 0 # Now we train the model _A = evaluate.load('''glue''' , '''mrpc''' ) _A = 0 _A = {} for epoch in range(_lowercase , _lowercase ): model.train() for step, batch in enumerate(_lowercase ): _A = model(**_lowercase ) _A = outputs.loss _A = loss / gradient_accumulation_steps accelerator.backward(_lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() _A = 0 for step, batch in enumerate(_lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _A = model(**_lowercase ) _A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _A ,_A = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_lowercase ) - 1: _A = predictions[: len(eval_dataloader.dataset ) - samples_seen] _A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_lowercase , references=_lowercase , ) _A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , _lowercase ) _A = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: _A = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(_lowercase , _lowercase ) def __A ( ): '''simple docstring''' _A = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , ) parser.add_argument( '''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=_lowercase , default=_lowercase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=_lowercase , default=3 , help='''Number of train epochs.''' , ) _A = parser.parse_args() _A = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(_lowercase , _lowercase ) if __name__ == "__main__": main()
484
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self: List[str] , __A: List[str] , __A: List[str]=7 , __A: Tuple=3 , __A: Optional[int]=30 , __A: Optional[Any]=4_00 , __A: int=True , __A: str=None , __A: int=True , __A: Any=[0.5, 0.5, 0.5] , __A: Dict=[0.5, 0.5, 0.5] , __A: Dict=True , __A: str=1 / 2_55 , __A: Dict=True , ) -> Tuple: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33} _A = parent _A = batch_size _A = num_channels _A = min_resolution _A = max_resolution _A = do_resize _A = size _A = do_normalize _A = image_mean _A = image_std _A = do_rescale _A = rescale_factor _A = do_pad def __A ( self: Optional[Any] ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __A ( self: Any , __A: Optional[Any] , __A: int=False ) -> List[str]: if not batched: _A = image_inputs[0] if isinstance(__A , Image.Image ): _A ,_A = image.size else: _A ,_A = image.shape[1], image.shape[2] if w < h: _A = int(self.size['''shortest_edge'''] * h / w ) _A = self.size['''shortest_edge'''] elif w > h: _A = self.size['''shortest_edge'''] _A = int(self.size['''shortest_edge'''] * w / h ) else: _A = self.size['''shortest_edge'''] _A = self.size['''shortest_edge'''] else: _A = [] for image in image_inputs: _A ,_A = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _A = max(__A , key=lambda __A : item[0] )[0] _A = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ): """simple docstring""" A_ = DeformableDetrImageProcessor if is_vision_available() else None def __A ( self: List[str] ) -> List[str]: _A = DeformableDetrImageProcessingTester(self ) @property def __A ( self: Tuple ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self: Any ) -> List[str]: _A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , '''image_mean''' ) ) self.assertTrue(hasattr(__A , '''image_std''' ) ) self.assertTrue(hasattr(__A , '''do_normalize''' ) ) self.assertTrue(hasattr(__A , '''do_resize''' ) ) self.assertTrue(hasattr(__A , '''do_rescale''' ) ) self.assertTrue(hasattr(__A , '''do_pad''' ) ) self.assertTrue(hasattr(__A , '''size''' ) ) def __A ( self: Tuple ) -> Optional[Any]: _A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} ) self.assertEqual(image_processor.do_pad , __A ) _A = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __A ) def __A ( self: Dict ) -> Any: pass def __A ( self: str ) -> List[str]: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A ,_A = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A ) _A = image_processing(__A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self: str ) -> Tuple: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A ,_A = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values _A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __A ( self: int ) -> Any: # Initialize image_processing _A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input _A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _A ,_A = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _A = image_processing(__A , return_tensors='''pt''' ).pixel_values _A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __A ( self: Optional[Any] ) -> Tuple: # prepare image and target _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''image_id''': 3_97_69, '''annotations''': target} # encode them _A = DeformableDetrImageProcessor() _A = image_processing(images=__A , annotations=__A , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __A ) _A = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area _A = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A ) _A = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) ) # verify class_labels _A = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) ) @slow def __A ( self: Dict ) -> Optional[int]: # prepare image, target and masks_path _A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _A = json.loads(f.read() ) _A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target} _A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _A = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _A = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' ) # verify pixel values _A = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['''pixel_values'''].shape , __A ) _A = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area _A = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) ) # verify boxes _A = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A ) _A = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) ) # verify image_id _A = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) ) # verify is_crowd _A = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) ) # verify class_labels _A = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) ) # verify masks _A = 82_28_73 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A ) # verify orig_size _A = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) ) # verify size _A = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
484
1
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ): """simple docstring""" __lowerCamelCase : str = [], [] __lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) ) __lowerCamelCase : List[str] = sorted_examples[0] def is_too_big(UpperCAmelCase : Optional[Any] ): return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __lowerCamelCase : Union[str, Any] = new_src + """ """ + src __lowerCamelCase : str = new_tgt + """ """ + tgt if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) __lowerCamelCase : str = src, tgt else: # can fit, keep adding __lowerCamelCase : int = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) return finished_src, finished_tgt def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ): """simple docstring""" __lowerCamelCase : List[Any] = Path(UpperCAmelCase ) save_path.mkdir(exist_ok=UpperCAmelCase ) for split in ["train"]: __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" ) Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) for split in ["val", "test"]: __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" ) shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" ) def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 ) parser.add_argument("""--data_dir""" , type=UpperCAmelCase ) parser.add_argument("""--save_path""" , type=UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = parser.parse_args() __lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
709
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ): """simple docstring""" __lowerCamelCase , __lowerCamelCase : str = [], [] __lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) ) __lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0] def is_too_big(UpperCAmelCase : Optional[Any] ): return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __lowerCamelCase : Union[str, Any] = new_src + """ """ + src __lowerCamelCase : str = new_tgt + """ """ + tgt if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) __lowerCamelCase , __lowerCamelCase : str = src, tgt else: # can fit, keep adding __lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) return finished_src, finished_tgt def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ): """simple docstring""" __lowerCamelCase : List[Any] = Path(UpperCAmelCase ) save_path.mkdir(exist_ok=UpperCAmelCase ) for split in ["train"]: __lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" ) Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) for split in ["val", "test"]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" ) shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" ) def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 ) parser.add_argument("""--data_dir""" , type=UpperCAmelCase ) parser.add_argument("""--save_path""" , type=UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = parser.parse_args() __lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
458
0
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Tuple = {"""vocab_file""": """vocab.txt"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } UpperCamelCase : int = { """openbmb/cpm-ant-10b""": 1024, } def UpperCamelCase_ ( __a ) -> Tuple: a__ : List[str] = collections.OrderedDict() with open(__a , "r" , encoding="utf-8" ) as reader: a__ : Optional[int] = reader.readlines() for index, token in enumerate(__a ): a__ : List[str] = token.rstrip("\n" ) a__ : Tuple = index return vocab class A__ ( A__ ): """simple docstring""" def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Any=200 ): a__ : Tuple = vocab a__ : Tuple = unk_token a__ : List[Any] = max_input_chars_per_word def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] ): a__ : List[str] = list(lowerCamelCase__ ) if len(lowerCamelCase__ ) > self.max_input_chars_per_word: return [self.unk_token] a__ : Optional[int] = 0 a__ : Tuple = [] while start < len(lowerCamelCase__ ): a__ : Union[str, Any] = len(lowerCamelCase__ ) a__ : List[str] = None while start < end: a__ : int = "".join(chars[start:end] ) if substr in self.vocab: a__ : Optional[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCamelCase__ ) a__ : List[Any] = end return sub_tokens class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = False def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]="<d>" , lowerCamelCase__ : List[Any]="</d>" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<pad>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : int="</n>" , lowerCamelCase__ : Optional[Any]="</_>" , lowerCamelCase__ : Any="left" , **lowerCamelCase__ : Optional[Any] , ): requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Union[str, Any] = bod_token a__ : Optional[Any] = eod_token a__ : Any = load_vocab(lowerCamelCase__ ) a__ : Dict = self.encoder[space_token] a__ : List[str] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] a__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) ) a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} a__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _UpperCamelCase( self : List[str] ): return self.encoder[self.bod_token] @property def _UpperCamelCase( self : Union[str, Any] ): return self.encoder[self.eod_token] @property def _UpperCamelCase( self : Tuple ): return self.encoder["\n"] @property def _UpperCamelCase( self : Optional[int] ): return len(self.encoder ) def _UpperCamelCase( self : Dict ): return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[Any] ): a__ : Optional[Any] = [] for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) ) return output_tokens def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ): a__ : Tuple = [i for i in token_ids if i >= 0] a__ : List[str] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] ): return token in self.encoder def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] ): return "".join(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ): return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple ): return self.decoder.get(lowerCamelCase__ , self.unk_token ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if os.path.isdir(lowerCamelCase__ ): a__ : Optional[int] = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: a__ : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory a__ : Dict = 0 if " " in self.encoder: a__ : int = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: a__ : int = self.encoder["\n"] del self.encoder["\n"] a__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) a__ : List[Any] = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) return [1] + ([0] * len(lowerCamelCase__ ))
37
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __A : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : Dict = PegasusConfig __UpperCAmelCase : int = {} __UpperCAmelCase : Tuple = "gelu" def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ): __lowercase : int = parent __lowercase : str = batch_size __lowercase : Tuple = seq_length __lowercase : Tuple = is_training __lowercase : Dict = use_labels __lowercase : List[str] = vocab_size __lowercase : int = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : List[Any] = num_attention_heads __lowercase : int = intermediate_size __lowercase : Any = hidden_dropout_prob __lowercase : Tuple = attention_probs_dropout_prob __lowercase : List[Any] = max_position_embeddings __lowercase : int = eos_token_id __lowercase : Union[str, Any] = pad_token_id __lowercase : Union[str, Any] = bos_token_id def snake_case ( self : int ): __lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) return config, inputs_dict def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ): __lowercase : Union[str, Any] = 2_0 __lowercase : List[Any] = model_class_name(lowercase__ ) __lowercase : Tuple = model.encode(inputs_dict["input_ids"] ) __lowercase ,__lowercase : Optional[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ ) __lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) __lowercase : Union[str, Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase : Optional[int] = model.decode( decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , ) __lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , ) __lowercase : List[Any] = model.decode(lowercase__ , lowercase__ ) __lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ): __lowercase : Any = 2_0 __lowercase : Any = model_class_name(lowercase__ ) __lowercase : List[Any] = model.encode(inputs_dict["input_ids"] ) __lowercase ,__lowercase : Optional[Any] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) __lowercase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ ) __lowercase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase : str = model.decode( decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , ) __lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) __lowercase : Dict = model.decode( decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , ) __lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ ) __lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int: """simple docstring""" if attention_mask is None: __lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __lowercase : Optional[int] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ), ], axis=-1, ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __UpperCAmelCase : Dict = True __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = False __UpperCAmelCase : Optional[int] = False def snake_case ( self : List[Any] ): __lowercase : Optional[Any] = FlaxPegasusModelTester(self ) __lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ ) def snake_case ( self : List[Any] ): self.config_tester.run_common_tests() def snake_case ( self : Optional[Any] ): __lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ ) def snake_case ( self : Optional[int] ): __lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ ) def snake_case ( self : Tuple ): __lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ ) __lowercase : List[str] = model_class(lowercase__ ) @jax.jit def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ): return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ ) with self.subTest("JIT Enabled" ): __lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple() self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for jitted_output, output in zip(lowercase__ , lowercase__ ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case ( self : Optional[Any] ): __lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase : Union[str, Any] = model_class(lowercase__ ) __lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) __lowercase : Optional[int] = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ): return model.decode( decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , ) with self.subTest("JIT Enabled" ): __lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase : Any = decode_jitted(**lowercase__ ).to_tuple() self.assertEqual(len(lowercase__ ) , len(lowercase__ ) ) for jitted_output, output in zip(lowercase__ , lowercase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def snake_case ( self : Any ): for model_class_name in self.all_model_classes: __lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ ) __lowercase : Any = np.ones((1, 1) ) __lowercase : Tuple = model(lowercase__ ) self.assertIsNotNone(lowercase__ ) @slow def snake_case ( self : Optional[int] ): __lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) __lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) __lowercase : Any = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] __lowercase : Union[str, Any] = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] __lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ ) __lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences __lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ ) assert tgt_text == decoded
575
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any: """simple docstring""" if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): _UpperCamelCase : Tuple = np.full((len(lowerCAmelCase__ ), sequence_length, 2) ,lowerCAmelCase__ ) else: _UpperCamelCase : str = np.full((len(lowerCAmelCase__ ), sequence_length) ,lowerCAmelCase__ ) for i, tensor in enumerate(lowerCAmelCase__ ): if padding_side == "right": if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): _UpperCamelCase : Optional[int] = tensor[:sequence_length] else: _UpperCamelCase : Union[str, Any] = tensor[:sequence_length] else: if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ): _UpperCamelCase : List[Any] = tensor[:sequence_length] else: _UpperCamelCase : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def lowercase__ ( lowercase_ ) -> str: """simple docstring""" _UpperCamelCase : Dict = ord(lowerCAmelCase__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True _UpperCamelCase : Optional[Any] = unicodedata.category(lowerCAmelCase__ ) if cat.startswith("P" ): return True return False @dataclass class __SCREAMING_SNAKE_CASE ( __a ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :PreTrainedTokenizerBase SCREAMING_SNAKE_CASE__ :Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE__ :Optional[int] = None SCREAMING_SNAKE_CASE__ :Optional[int] = None SCREAMING_SNAKE_CASE__ :int = -100 SCREAMING_SNAKE_CASE__ :str = "pt" def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> List[str]: import torch _UpperCamelCase : Tuple = "label" if "label" in features[0].keys() else "labels" _UpperCamelCase : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _UpperCamelCase : Dict = self.tokenizer.pad( snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch _UpperCamelCase : Any = torch.tensor(batch["entity_ids"] ).shape[1] _UpperCamelCase : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": _UpperCamelCase : Dict = [ list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels ] else: _UpperCamelCase : int = [ [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels ] _UpperCamelCase : Any = [feature["ner_tags"] for feature in features] _UpperCamelCase : List[Any] = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ ) _UpperCamelCase : str = [feature["original_entity_spans"] for feature in features] _UpperCamelCase : List[str] = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ ) _UpperCamelCase : Optional[int] = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()} return batch
705
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = LEDConfig SCREAMING_SNAKE_CASE__ :str = {} SCREAMING_SNAKE_CASE__ :List[str] = "gelu" def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]: _UpperCamelCase : Optional[Any] = parent _UpperCamelCase : List[str] = batch_size _UpperCamelCase : str = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Any = use_labels _UpperCamelCase : Any = vocab_size _UpperCamelCase : List[str] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : Dict = num_attention_heads _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : int = hidden_dropout_prob _UpperCamelCase : Dict = attention_probs_dropout_prob _UpperCamelCase : str = max_position_embeddings _UpperCamelCase : int = eos_token_id _UpperCamelCase : Dict = pad_token_id _UpperCamelCase : Optional[Any] = bos_token_id _UpperCamelCase : str = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _UpperCamelCase : List[str] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _UpperCamelCase : int = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __SCREAMING_SNAKE_CASE ( self : int ) -> str: _UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a ) _UpperCamelCase : Union[str, Any] = tf.concat( [tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , ) _UpperCamelCase : Union[str, Any] = global_attention_mask return config, inputs_dict def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple: _UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder() _UpperCamelCase : Tuple = inputs_dict["input_ids"] _UpperCamelCase : int = input_ids[:1, :] _UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :] _UpperCamelCase : List[Any] = 1 # first forward pass _UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a ) _UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0] _UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx] _UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict: """simple docstring""" if attention_mask is None: _UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: _UpperCamelCase : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: _UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE__ :List[str] = ( { "conversational": TFLEDForConditionalGeneration, "feature-extraction": TFLEDModel, "summarization": TFLEDForConditionalGeneration, "text2text-generation": TFLEDForConditionalGeneration, "translation": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ :Tuple = True SCREAMING_SNAKE_CASE__ :str = False SCREAMING_SNAKE_CASE__ :Optional[Any] = False SCREAMING_SNAKE_CASE__ :int = False def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: _UpperCamelCase : int = TFLEDModelTester(self ) _UpperCamelCase : Any = ConfigTester(self , config_class=__a ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: _UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] ) _UpperCamelCase : Union[str, Any] = 2 _UpperCamelCase : str = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) _UpperCamelCase : Dict = True _UpperCamelCase : str = self.model_tester.seq_length _UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(__a : Optional[int] ): _UpperCamelCase : Optional[int] = outputs.decoder_attentions self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(__a : Optional[Any] ): _UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions] _UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _UpperCamelCase : Dict = True _UpperCamelCase : Optional[Any] = False _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = model_class(__a ) _UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) ) _UpperCamelCase : Any = len(__a ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) if self.is_encoder_decoder: _UpperCamelCase : Optional[Any] = model_class(__a ) _UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_decoder_attentions_output(__a ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _UpperCamelCase : int = True _UpperCamelCase : Tuple = model_class(__a ) _UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) # Check attention is always last and order is fine _UpperCamelCase : Any = True _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = model_class(__a ) _UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) ) self.assertEqual(model.config.output_hidden_states , __a ) check_encoder_attentions_output(__a ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict: pass def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: # TODO: Head-masking not yet implement pass def lowercase__ ( lowercase_ ) -> Union[str, Any]: """simple docstring""" return tf.constant(lowercase_ ,dtype=tf.intaa ) lowerCamelCase__ = 1E-4 @slow @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: _UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here _UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a ) _UpperCamelCase : Optional[int] = model(**__a )[0] _UpperCamelCase : Optional[int] = (1, 1024, 768) self.assertEqual(output.shape , __a ) # change to expected output here _UpperCamelCase : Tuple = tf.convert_to_tensor( [[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 ) def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str: _UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here _UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] ) _UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a ) _UpperCamelCase : Union[str, Any] = model(**__a )[0] _UpperCamelCase : int = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , __a ) # change to expected output here _UpperCamelCase : Optional[int] = tf.convert_to_tensor( [[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , ) tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
51
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowercase ( unittest.TestCase ): def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , ) -> Tuple: snake_case = size if size is not None else {'''height''': 18, '''width''': 18} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = apply_ocr def UpperCamelCase ( self ) -> int: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowercase ( snake_case_ , unittest.TestCase ): _UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCamelCase ( self ) -> str: snake_case = LayoutLMvaImageProcessingTester(self ) @property def UpperCamelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self ) -> List[Any]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A__ , '''do_resize''' ) ) self.assertTrue(hasattr(A__ , '''size''' ) ) self.assertTrue(hasattr(A__ , '''apply_ocr''' ) ) def UpperCamelCase ( self ) -> str: snake_case = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def UpperCamelCase ( self ) -> Any: pass def UpperCamelCase ( self ) -> Optional[int]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ ) for image in image_inputs: self.assertIsInstance(A__ , Image.Image ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , A__ ) self.assertIsInstance(encoding.boxes , A__ ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def UpperCamelCase ( self ) -> Union[str, Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , np.ndarray ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def UpperCamelCase ( self ) -> Union[str, Any]: # Initialize image_processing snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ ) for image in image_inputs: self.assertIsInstance(A__ , torch.Tensor ) # Test not batched input snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def UpperCamelCase ( self ) -> Optional[Any]: # with apply_OCR = True snake_case = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) snake_case = image_processing(A__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , A__ ) self.assertListEqual(encoding.boxes , A__ ) # with apply_OCR = False snake_case = LayoutLMvaImageProcessor(apply_ocr=A__ ) snake_case = image_processing(A__ , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
342
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)] def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution __a :list[bool | None] = [None] * 1000_0000 __a :Optional[Any] = True __a :List[Any] = False def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ = chain(next_number(__UpperCamelCase ) ) A_ = number_chain while number < 1000_0000: A_ = number_chain number *= 10 return number_chain def __snake_case ( __UpperCamelCase : int = 1000_0000 ): """simple docstring""" for i in range(1 ,__UpperCamelCase ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() print(F"{solution() = }")
86
0
def snake_case (UpperCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ = [0] * len(UpperCamelCase ) lowerCamelCase__ = [] lowerCamelCase__ = [1] * len(UpperCamelCase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(UpperCamelCase ) ): if indegree[i] == 0: queue.append(UpperCamelCase ) while queue: lowerCamelCase__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowerCamelCase__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(UpperCamelCase ) print(max(UpperCamelCase ) ) # Adjacency list of Graph a__ : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
235
def snake_case (UpperCamelCase : int = 50 ): '''simple docstring''' lowerCamelCase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
235
1
'''simple docstring''' import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) SCREAMING_SNAKE_CASE__ = pytest.mark.integration @pytest.mark.parametrize("path" ,["paws", "csv"] ) def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : str ): '''simple docstring''' inspect_dataset(_snake_case ,_snake_case ) lowercase__ = path + ".py" assert script_name in os.listdir(_snake_case ) assert "__pycache__" not in os.listdir(_snake_case ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" ,["accuracy"] ) def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Dict ): '''simple docstring''' inspect_metric(_snake_case ,_snake_case ) lowercase__ = path + ".py" assert script_name in os.listdir(_snake_case ) assert "__pycache__" not in os.listdir(_snake_case ) @pytest.mark.parametrize( "path, config_name, expected_splits" ,[ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] ,) def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Dict ,_snake_case : Union[str, Any] ): '''simple docstring''' lowercase__ = get_dataset_config_info(_snake_case ,config_name=_snake_case ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" ,[ ("paws", None, ValueError), ] ,) def lowerCamelCase ( _snake_case : Any ,_snake_case : List[str] ,_snake_case : Any ): '''simple docstring''' with pytest.raises(_snake_case ): get_dataset_config_info(_snake_case ,config_name=_snake_case ) @pytest.mark.parametrize( "path, expected" ,[ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] ,) def lowerCamelCase ( _snake_case : int ,_snake_case : List[str] ): '''simple docstring''' lowercase__ = get_dataset_config_names(_snake_case ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" ,[ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] ,) def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : int ): '''simple docstring''' lowercase__ = get_dataset_infos(_snake_case ) assert list(infos.keys() ) == expected_configs lowercase__ = expected_configs[0] assert expected_config in infos lowercase__ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" ,[ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] ,) def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : Any ,_snake_case : List[Any] ): '''simple docstring''' lowercase__ = get_dataset_infos(_snake_case ) assert expected_config in infos lowercase__ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" ,[ ("paws", None, ValueError), ] ,) def lowerCamelCase ( _snake_case : str ,_snake_case : Tuple ,_snake_case : Optional[Any] ): '''simple docstring''' with pytest.raises(_snake_case ): get_dataset_split_names(_snake_case ,config_name=_snake_case )
267
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class snake_case (UpperCamelCase , UpperCamelCase ): @register_to_config def __init__( self ,UpperCAmelCase_ = 768 ,) -> List[Any]: super().__init__() lowercase__ = nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) ) lowercase__ = nn.Parameter(torch.ones(1 ,UpperCAmelCase_ ) ) def _a ( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,) -> Any: lowercase__ = nn.Parameter(self.mean.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) ) lowercase__ = nn.Parameter(self.std.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) ) return self def _a ( self ,UpperCAmelCase_ ) -> Tuple: lowercase__ = (embeds - self.mean) * 1.0 / self.std return embeds def _a ( self ,UpperCAmelCase_ ) -> List[str]: lowercase__ = (embeds * self.std) + self.mean return embeds
267
1
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __lowercase = logging.get_logger('''transformers.models.encodec''') __lowercase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } __lowercase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } __lowercase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } __lowercase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } __lowercase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } __lowercase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __lowercase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __lowercase = [] __lowercase = [] def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int ): """simple docstring""" for attribute in key.split('''.''' ): __UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __UpperCamelCase =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __UpperCamelCase =value elif weight_type == "weight_g": __UpperCamelCase =value elif weight_type == "weight_v": __UpperCamelCase =value elif weight_type == "bias": __UpperCamelCase =value elif weight_type == "running_mean": __UpperCamelCase =value elif weight_type == "running_var": __UpperCamelCase =value elif weight_type == "num_batches_tracked": __UpperCamelCase =value elif weight_type == "weight_ih_l0": __UpperCamelCase =value elif weight_type == "weight_hh_l0": __UpperCamelCase =value elif weight_type == "bias_ih_l0": __UpperCamelCase =value elif weight_type == "bias_hh_l0": __UpperCamelCase =value elif weight_type == "weight_ih_l1": __UpperCamelCase =value elif weight_type == "weight_hh_l1": __UpperCamelCase =value elif weight_type == "bias_ih_l1": __UpperCamelCase =value elif weight_type == "bias_hh_l1": __UpperCamelCase =value else: __UpperCamelCase =value logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" ) def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ): """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __UpperCamelCase , __UpperCamelCase =key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ): """simple docstring""" __UpperCamelCase =[] if model_name == "encodec_24khz" or "encodec_32khz": __UpperCamelCase =MAPPING_24K elif model_name == "encodec_48khz": __UpperCamelCase =MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__UpperCamelCase , __UpperCamelCase ): logger.info(F"""{name} was ignored""" ) continue __UpperCamelCase =False for key, mapped_key in MAPPING.items(): if "*" in key: __UpperCamelCase , __UpperCamelCase =key.split('''.*.''' ) if prefix in name and suffix in name: __UpperCamelCase =suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue __UpperCamelCase =True if "*" in mapped_key: __UpperCamelCase =name.split(__UpperCamelCase )[0].split('''.''' )[-2] __UpperCamelCase =mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __UpperCamelCase ='''weight_g''' elif "weight_v" in name: __UpperCamelCase ='''weight_v''' elif "weight_ih_l0" in name: __UpperCamelCase ='''weight_ih_l0''' elif "weight_hh_l0" in name: __UpperCamelCase ='''weight_hh_l0''' elif "bias_ih_l0" in name: __UpperCamelCase ='''bias_ih_l0''' elif "bias_hh_l0" in name: __UpperCamelCase ='''bias_hh_l0''' elif "weight_ih_l1" in name: __UpperCamelCase ='''weight_ih_l1''' elif "weight_hh_l1" in name: __UpperCamelCase ='''weight_hh_l1''' elif "bias_ih_l1" in name: __UpperCamelCase ='''bias_ih_l1''' elif "bias_hh_l1" in name: __UpperCamelCase ='''bias_hh_l1''' elif "bias" in name: __UpperCamelCase ='''bias''' elif "weight" in name: __UpperCamelCase ='''weight''' elif "running_mean" in name: __UpperCamelCase ='''running_mean''' elif "running_var" in name: __UpperCamelCase ='''running_var''' elif "num_batches_tracked" in name: __UpperCamelCase ='''num_batches_tracked''' else: __UpperCamelCase =None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def lowerCAmelCase (__UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Dict=None , ): """simple docstring""" if config_path is not None: __UpperCamelCase =EncodecConfig.from_pretrained(__UpperCamelCase ) else: __UpperCamelCase =EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": __UpperCamelCase =[8, 5, 4, 4] __UpperCamelCase =[2.2] __UpperCamelCase =6_4 __UpperCamelCase =3_2_0_0_0 __UpperCamelCase =2_0_4_8 __UpperCamelCase =False __UpperCamelCase =False __UpperCamelCase =False elif model_name == "encodec_48khz": __UpperCamelCase =[8, 5, 4, 2] __UpperCamelCase =[3.0, 6.0, 1_2.0, 2_4.0] __UpperCamelCase =4_8_0_0_0 __UpperCamelCase =2 __UpperCamelCase =False __UpperCamelCase ='''time_group_norm''' __UpperCamelCase =True __UpperCamelCase =1.0 __UpperCamelCase =0.0_1 else: raise ValueError(F"""Unknown model name: {model_name}""" ) __UpperCamelCase =EncodecModel(__UpperCamelCase ) __UpperCamelCase =EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__UpperCamelCase ) __UpperCamelCase =torch.load(__UpperCamelCase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights __UpperCamelCase =original_checkpoint['''best_state'''] recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__UpperCamelCase ) model.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __lowercase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
296
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _lowercase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' __UpperCamelCase =str(id_ ) __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =[] __UpperCamelCase ={} # {vertex:distance} def __lt__( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' return self.key < other.key def __repr__( self : int ) -> Tuple: '''simple docstring''' return self.id def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Optional[Any]: '''simple docstring''' self.neighbors.append(UpperCamelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> Tuple: '''simple docstring''' __UpperCamelCase =weight def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int ): """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase ) graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase ) def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ): """simple docstring""" __UpperCamelCase =[] for u in graph: __UpperCamelCase =math.inf __UpperCamelCase =None __UpperCamelCase =0 __UpperCamelCase =graph[:] while q: __UpperCamelCase =min(__UpperCamelCase ) q.remove(__UpperCamelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __UpperCamelCase =u __UpperCamelCase =u.edges[v.id] for i in range(1 , len(__UpperCamelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ): """simple docstring""" for u in graph: __UpperCamelCase =math.inf __UpperCamelCase =None __UpperCamelCase =0 __UpperCamelCase =list(__UpperCamelCase ) hq.heapify(__UpperCamelCase ) while h: __UpperCamelCase =hq.heappop(__UpperCamelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __UpperCamelCase =u __UpperCamelCase =u.edges[v.id] hq.heapify(__UpperCamelCase ) for i in range(1 , len(__UpperCamelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase (): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
296
1
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class snake_case ( __snake_case ): """simple docstring""" __lowerCAmelCase = ["""image_processor""", """tokenizer"""] __lowerCAmelCase = """OwlViTImageProcessor""" __lowerCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ): __lowercase = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCAmelCase_ , ) __lowercase = kwargs.pop("feature_extractor" ) __lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="max_length" , lowerCAmelCase_="np" , **lowerCAmelCase_ ): if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )): __lowercase = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )] elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ): __lowercase = [] # Maximum number of queries across batch __lowercase = max([len(lowerCAmelCase_ ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(lowerCAmelCase_ ) != max_num_queries: __lowercase = t + [" "] * (max_num_queries - len(lowerCAmelCase_ )) __lowercase = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) encodings.append(lowerCAmelCase_ ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": __lowercase = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __lowercase = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __lowercase = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) __lowercase = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __lowercase = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) __lowercase = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __lowercase = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) __lowercase = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) __lowercase = BatchEncoding() __lowercase = input_ids __lowercase = attention_mask if query_images is not None: __lowercase = BatchEncoding() __lowercase = self.image_processor( lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values __lowercase = query_pixel_values if images is not None: __lowercase = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None and images is not None: __lowercase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __lowercase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ ) def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ): return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def snake_case__ ( self ): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , ) return self.image_processor_class @property def snake_case__ ( self ): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , ) return self.image_processor
321
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=56 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , lowerCAmelCase_="block_sparse" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=3 , ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_attention_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_choices __lowercase = rescale_embeddings __lowercase = attention_type __lowercase = use_bias __lowercase = block_size __lowercase = num_random_blocks def snake_case__ ( self ): __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_attention_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self ): __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case ( __snake_case ,unittest.TestCase ): """simple docstring""" __lowerCAmelCase = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __lowerCAmelCase = False __lowerCAmelCase = False def snake_case__ ( self ): __lowercase = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def snake_case__ ( self ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def snake_case__ ( self ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def snake_case__ ( self ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def snake_case__ ( self ): super().test_hidden_states_output() @slow def snake_case__ ( self ): for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case__ ( self ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def snake_case__ ( self ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase = model_class(lowerCAmelCase_ ) @jax.jit def model_jitted(lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ): return model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ ) with self.subTest("JIT Enabled" ): __lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): __lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple() self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-5 , lowerCAmelCase_="outputs" , lowerCAmelCase_=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
321
1
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ): '''simple docstring''' debug_launcher(test_script.main ) def _lowerCAmelCase ( self ): '''simple docstring''' debug_launcher(test_ops.main )
460
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self ): '''simple docstring''' a_ : int = """ylacombe/bark-small""" a_ : Dict = tempfile.mkdtemp() a_ : Union[str, Any] = """en_speaker_1""" a_ : Dict = """This is a test string""" a_ : Optional[int] = """speaker_embeddings_path.json""" a_ : int = """speaker_embeddings""" def _lowerCAmelCase ( self , **lowerCAmelCase_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase_ ) def _lowerCAmelCase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Union[str, Any] = self.get_tokenizer() a_ : Optional[Any] = BarkProcessor(tokenizer=lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) a_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) a_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a_ : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : List[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) a_ : Dict = 35 a_ : List[Any] = 2 a_ : Optional[int] = 8 a_ : int = { """semantic_prompt""": np.ones(lowerCAmelCase_ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset a_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase_ ) a_ : Tuple = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from npz file a_ : Any = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(lowerCAmelCase_ , **lowerCAmelCase_ ) a_ : Any = processor(text=self.input_string , voice_preset=lowerCAmelCase_ ) a_ : List[Any] = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() ) # test loading voice preset from the hub a_ : Any = processor(text=self.input_string , voice_preset=self.voice_preset ) def _lowerCAmelCase ( self ): '''simple docstring''' a_ : Tuple = self.get_tokenizer() a_ : Union[str, Any] = BarkProcessor(tokenizer=lowerCAmelCase_ ) a_ : Optional[int] = processor(text=self.input_string ) a_ : Optional[int] = tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
460
1
"""simple docstring""" UpperCamelCase = """Tobias Carryer""" from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=int(time() ) ) -> Any: # noqa: B008 A__ = multiplier A__ = increment A__ = modulo A__ = seed def snake_case__ ( self ) -> Any: A__ = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. UpperCamelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
104
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __magic_name__ ( __lowerCAmelCase : Any ) -> Optional[Any]: __lowerCamelCase = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[str]: __lowerCamelCase , __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) __lowerCamelCase = emb.weight.data return lin_layer def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=None ) -> Optional[int]: __lowerCamelCase = {} for old_key in state_dict.keys(): __lowerCamelCase = old_key if "moe_layer.experts." in key: if expert_idx is not None: __lowerCamelCase = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' ) else: __lowerCamelCase = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: __lowerCamelCase = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: __lowerCamelCase = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: __lowerCamelCase = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: __lowerCamelCase = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: __lowerCamelCase = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: __lowerCamelCase = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) __lowerCamelCase = state_dict[old_key] return new_dict def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str = WEIGHTS_NAME ) -> Dict: __lowerCamelCase = [] __lowerCamelCase = 0 os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) for expert in range(__lowerCAmelCase ): __lowerCamelCase = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(__lowerCAmelCase ): __lowerCamelCase = torch.load(__lowerCAmelCase )['''model'''] remove_ignore_keys_(__lowerCAmelCase ) __lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = os.path.join( __lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowerCAmelCase )[0]].dtype ) # Add the last block __lowerCamelCase = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) ) __lowerCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__lowerCAmelCase ) __lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowerCAmelCase ) == 1: __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowerCAmelCase , __lowerCAmelCase ) # Otherwise, let's build the index __lowerCamelCase = {} for idx, shard in enumerate(__lowerCAmelCase ): __lowerCamelCase = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin''' ) __lowerCamelCase = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) for key in shard: __lowerCamelCase = shard_file # Add the metadata __lowerCamelCase = {'''total_size''': total_size} __lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f: __lowerCamelCase = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '''\n''' f.write(__lowerCAmelCase ) return metadata, index if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) SCREAMING_SNAKE_CASE__ : str = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) SCREAMING_SNAKE_CASE__ : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
298
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
351
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : '''simple docstring''' @staticmethod def _a ( *a_ , **a_ ): pass @is_pipeline_test @require_vision @require_timm @require_torch class __snake_case ( unittest.TestCase): '''simple docstring''' UpperCamelCase__ : Dict = MODEL_FOR_OBJECT_DETECTION_MAPPING def _a ( self , a_ , a_ , a_ ): a__ = ObjectDetectionPipeline(model=a_ , image_processor=a_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _a ( self , a_ , a_ ): a__ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(a_ ) , 0 ) for detected_object in outputs: self.assertEqual( a_ , { """score""": ANY(a_ ), """label""": ANY(a_ ), """box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )}, } , ) import datasets a__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) a__ = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] a__ = object_detector(a_ , threshold=0.0 ) self.assertEqual(len(a_ ) , len(a_ ) ) for outputs in batch_outputs: self.assertGreater(len(a_ ) , 0 ) for detected_object in outputs: self.assertEqual( a_ , { """score""": ANY(a_ ), """label""": ANY(a_ ), """box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def _a ( self ): pass @require_torch def _a ( self ): a__ = """hf-internal-testing/tiny-detr-mobilenetsv3""" a__ = AutoModelForObjectDetection.from_pretrained(a_ ) a__ = AutoFeatureExtractor.from_pretrained(a_ ) a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ ) a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) a__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def _a ( self ): a__ = """facebook/detr-resnet-50""" a__ = AutoModelForObjectDetection.from_pretrained(a_ ) a__ = AutoFeatureExtractor.from_pretrained(a_ ) a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ ) a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) a__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def _a ( self ): a__ = """facebook/detr-resnet-50""" a__ = pipeline("""object-detection""" , model=a_ ) a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) a__ = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def _a ( self ): a__ = 0.9_985 a__ = """facebook/detr-resnet-50""" a__ = pipeline("""object-detection""" , model=a_ ) a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=a_ ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def _a ( self ): a__ = """Narsil/layoutlmv3-finetuned-funsd""" a__ = 0.9_993 a__ = pipeline("""object-detection""" , model=a_ , threshold=a_ ) a__ = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
351
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase: List[Any] = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: str = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase: Dict = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowercase: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
192
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowercase: Dict = logging.get_logger(__name__) def _lowerCamelCase ( snake_case ): _lowerCAmelCase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _lowerCAmelCase = [144, 192, 240] _lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: _lowerCAmelCase = [96, 120, 144] _lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: _lowerCAmelCase = [64, 80, 96] _lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320] _lowerCAmelCase = 0.05 _lowerCAmelCase = 2.0 if mobilevit_name.startswith('deeplabv3_' ): _lowerCAmelCase = 512 _lowerCAmelCase = 16 _lowerCAmelCase = 21 _lowerCAmelCase = 'pascal-voc-id2label.json' else: _lowerCAmelCase = 1_000 _lowerCAmelCase = 'imagenet-1k-id2label.json' _lowerCAmelCase = 'huggingface/label-files' _lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) ) _lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def _lowerCamelCase ( snake_case , snake_case=False ): for i in range(1 , 6 ): if F'layer_{i}.' in name: _lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' ) if "conv_1." in name: _lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' ) if ".block." in name: _lowerCAmelCase = name.replace('.block.' , '.' ) if "exp_1x1" in name: _lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' ) if "red_1x1" in name: _lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' ) if ".local_rep.conv_3x3." in name: _lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' ) if ".local_rep.conv_1x1." in name: _lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' ) if ".norm." in name: _lowerCAmelCase = name.replace('.norm.' , '.normalization.' ) if ".conv." in name: _lowerCAmelCase = name.replace('.conv.' , '.convolution.' ) if ".conv_proj." in name: _lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'.{i}.{j}.' in name: _lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'.{i}.{j}.' in name: _lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' ) if "expand_1x1" in name: _lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' ) if "conv_3x3" in name: _lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' ) if "reduce_1x1" in name: _lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' ) for i in range(2 , 5 ): if F'.global_rep.{i}.weight' in name: _lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' ) if F'.global_rep.{i}.bias' in name: _lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' ) if ".global_rep." in name: _lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' ) if ".pre_norm_mha.0." in name: _lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' ) if ".pre_norm_mha.1.out_proj." in name: _lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' ) if ".pre_norm_ffn.0." in name: _lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' ) if ".pre_norm_ffn.1." in name: _lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' ) if ".pre_norm_ffn.4." in name: _lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' ) if ".transformer." in name: _lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' ) if ".aspp_layer." in name: _lowerCAmelCase = name.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in name: _lowerCAmelCase = name.replace('.aspp_pool.' , '.' ) if "seg_head." in name: _lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' ) if "segmentation_head.classifier.classifier." in name: _lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' ) if "classifier.fc." in name: _lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' ) elif (not base_model) and ("segmentation_head." not in name): _lowerCAmelCase = 'mobilevit.' + name return name def _lowerCamelCase ( snake_case , snake_case , snake_case=False ): if base_model: _lowerCAmelCase = '' else: _lowerCAmelCase = 'mobilevit.' for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(snake_case ) if key[:8] == "encoder.": _lowerCAmelCase = key[8:] if "qkv" in key: _lowerCAmelCase = key.split('.' ) _lowerCAmelCase = int(key_split[0][6:] ) - 1 _lowerCAmelCase = int(key_split[3] ) _lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' ) _lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size _lowerCAmelCase = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[dim : dim * 2, :] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = val return orig_state_dict def _lowerCamelCase ( ): _lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ): _lowerCAmelCase = get_mobilevit_config(snake_case ) # load original state_dict _lowerCAmelCase = torch.load(snake_case , map_location='cpu' ) # load 🤗 model if mobilevit_name.startswith('deeplabv3_' ): _lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval() else: _lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval() _lowerCAmelCase = convert_state_dict(snake_case , snake_case ) model.load_state_dict(snake_case ) # Check outputs on an image, prepared by MobileViTImageProcessor _lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ) _lowerCAmelCase = model(**snake_case ) _lowerCAmelCase = outputs.logits if mobilevit_name.startswith('deeplabv3_' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": _lowerCAmelCase = torch.tensor( [ [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]], [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]], [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _lowerCAmelCase = torch.tensor( [ [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]], [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]], [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _lowerCAmelCase = torch.tensor( [ [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]], [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]], [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]], ] ) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' ) assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) else: assert logits.shape == (1, 1_000) if mobilevit_name == "mobilevit_s": _lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] ) elif mobilevit_name == "mobilevit_xs": _lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] ) elif mobilevit_name == "mobilevit_xxs": _lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' ) assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 ) Path(snake_case ).mkdir(exist_ok=snake_case ) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if push_to_hub: _lowerCAmelCase = { 'mobilevit_s': 'mobilevit-small', 'mobilevit_xs': 'mobilevit-x-small', 'mobilevit_xxs': 'mobilevit-xx-small', 'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small', 'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small', 'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small', } print('Pushing to the hub...' ) _lowerCAmelCase = model_mapping[mobilevit_name] image_processor.push_to_hub(snake_case , organization='apple' ) model.push_to_hub(snake_case , organization='apple' ) if __name__ == "__main__": _lowercase: Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowercase: List[str] = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
192
1
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=24 , __UpperCamelCase : int=16 , __UpperCamelCase : Tuple=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Optional[int]=5 , __UpperCamelCase : Any=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : int=0.02 , __UpperCamelCase : str=None , __UpperCamelCase : int=2 , __UpperCamelCase : int=2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = patch_size _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCAmelCase = frequency_out_dimension * time_out_dimension _UpperCAmelCase = num_patches + 2 def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, input_values, labels def UpperCAmelCase__ ( self : Union[str, Any] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ): _UpperCAmelCase = ASTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_values": input_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Tuple = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : int = False def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = ASTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def UpperCAmelCase__ ( self : Union[str, Any] ): pass def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["input_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @slow def UpperCAmelCase__ ( self : str ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = ASTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( ) -> int: _UpperCAmelCase = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) _UpperCAmelCase , _UpperCAmelCase = torchaudio.load(_lowerCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def UpperCAmelCase__ ( self : Tuple ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = self.default_feature_extractor _UpperCAmelCase = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCamelCase ) _UpperCAmelCase = self.default_feature_extractor _UpperCAmelCase , _UpperCAmelCase = prepare_audio() _UpperCAmelCase = audio.squeeze().numpy() _UpperCAmelCase = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
129
import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
129
1
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _a = logging.get_logger(__name__) _a = { """facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""", # See all DETR models at https://huggingface.co/models?filter=detr } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'detr' lowercase__ = ['past_key_values'] lowercase__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''') if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''') _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4''']) elif isinstance(__a , __a): _UpperCamelCase = backbone_config.get('''model_type''') _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(__a) # set timm attributes to None _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = decoder_layerdrop _UpperCamelCase = encoder_layers _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient super().__init__(is_encoder_decoder=__a , **__a) @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self.d_model @classmethod def UpperCAmelCase ( cls , __a , **__a) -> int: '''simple docstring''' return cls(backbone_config=__a , **__a) def UpperCAmelCase ( self) -> Dict[str, any]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output class _UpperCAmelCase( lowerCamelCase ): lowercase__ = version.parse('1.11' ) @property def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ]) @property def UpperCAmelCase ( self) -> float: '''simple docstring''' return 1e-5 @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return 12
19
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ): __a = parent __a = batch_size __a = num_channels __a = image_size __a = min_resolution __a = max_resolution __a = do_resize __a = size if size is not None else {'''height''': 18, '''width''': 20} __a = do_thumbnail __a = do_align_axis __a = do_pad __a = do_normalize __a = image_mean __a = image_std def __UpperCAmelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str = DonutImageProcessor if is_vision_available() else None def __UpperCAmelCase ( self ): __a = DonutImageProcessingTester(self ) @property def __UpperCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): __a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a , '''do_resize''' ) ) self.assertTrue(hasattr(_a , '''size''' ) ) self.assertTrue(hasattr(_a , '''do_thumbnail''' ) ) self.assertTrue(hasattr(_a , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(_a , '''do_pad''' ) ) self.assertTrue(hasattr(_a , '''do_normalize''' ) ) self.assertTrue(hasattr(_a , '''image_mean''' ) ) self.assertTrue(hasattr(_a , '''image_std''' ) ) def __UpperCAmelCase ( self ): __a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) __a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order __a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def __UpperCAmelCase ( self ): pass @is_flaky() def __UpperCAmelCase ( self ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a , Image.Image ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __a = image_processing(_a , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __UpperCAmelCase ( self ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a ) for image in image_inputs: self.assertIsInstance(_a , np.ndarray ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __a = image_processing(_a , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __UpperCAmelCase ( self ): # Initialize image_processing __a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a ) for image in image_inputs: self.assertIsInstance(_a , torch.Tensor ) # Test not batched input __a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __a = image_processing(_a , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
65
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , _a=None , **_a ): warnings.warn( '''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` ''' '''instead.''' , _a , ) super().__init__(args=_a , **_a )
65
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar __SCREAMING_SNAKE_CASE =TypeVar("T") __SCREAMING_SNAKE_CASE =TypeVar("U") class UpperCamelCase ( Generic[T, U] ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = key lowercase_ : Optional[int] = val lowercase_ : DoubleLinkedListNode[T, U] | None = None lowercase_ : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: '''simple docstring''' return ( f'''Node: key: {self.key}, val: {self.val}, ''' f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class UpperCamelCase ( Generic[T, U] ): def __init__( self ) -> None: '''simple docstring''' lowercase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ ) lowercase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ ) lowercase_ : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: '''simple docstring''' lowercase_ : Tuple = ["DoubleLinkedList"] lowercase_ : List[Any] = self.head while node.next is not None: rep.append(str(lowerCAmelCase_ ) ) lowercase_ : Tuple = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowerCAmelCase_ ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Dict = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowercase_ : List[Any] = node lowercase_ : Any = previous lowercase_ : List[Any] = node lowercase_ : Dict = self.rear def _UpperCAmelCase ( self ,__UpperCamelCase ) -> DoubleLinkedListNode[T, U] | None: '''simple docstring''' if node.prev is None or node.next is None: return None lowercase_ : List[Any] = node.next lowercase_ : Optional[Any] = node.prev lowercase_ : List[str] = None lowercase_ : str = None return node class UpperCamelCase ( Generic[T, U] ): lowercase = {} def __init__( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : DoubleLinkedList[T, U] = DoubleLinkedList() lowercase_ : List[str] = capacity lowercase_ : Tuple = 0 lowercase_ : List[str] = 0 lowercase_ : List[str] = 0 lowercase_ : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: '''simple docstring''' return ( f'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' f'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self ,__UpperCamelCase ) -> bool: '''simple docstring''' return key in self.cache def _UpperCAmelCase ( self ,__UpperCamelCase ) -> U | None: '''simple docstring''' if key in self.cache: self.hits += 1 lowercase_ : DoubleLinkedListNode[T, U] = self.cache[key] lowercase_ : int = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowerCAmelCase_ ) return node.val self.miss += 1 return None def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> None: '''simple docstring''' if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowercase_ : str = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowerCAmelCase_ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowercase_ : List[Any] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowercase_ : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowercase_ : str = value self.list.add(lowerCAmelCase_ ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: '''simple docstring''' def cache_decorator_inner(__UpperCamelCase ) -> Callable[..., U]: def cache_decorator_wrapper(*__UpperCamelCase ) -> U: if func not in cls.decorator_function_to_instance_map: lowercase_ : Any = LRUCache(lowerCAmelCase_ ) lowercase_ : List[Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowercase_ : str = func(*lowerCAmelCase_ ) cls.decorator_function_to_instance_map[func].put(args[0] ,lowerCAmelCase_ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowerCAmelCase_ ,'cache_info' ,lowerCAmelCase_ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
425
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = torch.device('''cpu''') def snake_case ( ): UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw ) return im def snake_case ( A__ ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] ) def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Tuple = dct.pop(A__ ) UpperCAmelCase_ : Optional[Any] = val def snake_case ( A__ ): UpperCAmelCase_ : List[str] = [] for k in state_dict.keys(): UpperCAmelCase_ : Union[str, Any] = k if ".pwconv" in k: UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" ) if ".dwconv" in k: UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" ) if ".Proj." in k: UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." ) if "patch_embed" in k_new: UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" ) if "network" in k_new: UpperCAmelCase_ : List[Any] = k_new.split("." ) if ls[2].isdigit(): UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Optional[int] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase_ : Optional[Any] = 10_00 UpperCAmelCase_ : str = "huggingface/label-files" UpperCAmelCase_ : str = "imagenet-1k-id2label.json" UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) ) UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()} UpperCAmelCase_ : List[Any] = idalabel UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase_ : Tuple = [3, 3, 6, 4] UpperCAmelCase_ : str = [48, 56, 1_12, 2_20] elif swiftformer_name == "swiftformer_s": UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6] UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase_ : int = [4, 3, 10, 5] UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase_ : Dict = [4, 4, 12, 6] UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("https" ): UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ ) else: UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" ) UpperCAmelCase_ : List[str] = checkpoint UpperCAmelCase_ : Dict = create_rename_keys(A__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(A__ ,A__ ,A__ ) # load HuggingFace model UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval() hf_model.load_state_dict(A__ ) # prepare test inputs UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" ) UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" ) # compare outputs from both models UpperCAmelCase_ : List[Any] = get_expected_output(A__ ) UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits assert hf_logits.shape == torch.Size([1, 10_00] ) assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(A__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') lowerCamelCase_ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
95
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __A : Any = { "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "GroupViTModel", "GroupViTPreTrainedModel", "GroupViTTextModel", "GroupViTVisionModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGroupViTModel", "TFGroupViTPreTrainedModel", "TFGroupViTTextModel", "TFGroupViTVisionModel", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
595
"""simple docstring""" from __future__ import annotations __A : Union[str, Any] = [] def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int ): """simple docstring""" for i in range(len(UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int ): """simple docstring""" if row >= len(UpperCamelCase ): solution.append(UpperCamelCase ) printboard(UpperCamelCase ) print() return True for i in range(len(UpperCamelCase ) ): if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ): A__ : Optional[Any] =1 solve(UpperCamelCase , row + 1 ) A__ : Union[str, Any] =0 return False def lowercase ( UpperCamelCase : list[list[int]] ): """simple docstring""" for i in range(len(UpperCamelCase ) ): for j in range(len(UpperCamelCase ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) __A : List[Any] = 8 __A : Dict = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("The total no. of solutions are :", len(solution))
595
1
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case = logging.getLogger(__name__) def snake_case ( lowerCAmelCase_ ) -> List[str]: _snake_case = git.Repo(search_parent_directories=lowerCAmelCase_ ) _snake_case = { '''repo_id''': str(lowerCAmelCase_ ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(lowerCAmelCase_ , '''git_log.json''' ) , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=4 ) def snake_case ( lowerCAmelCase_ ) -> int: if params.n_gpu <= 0: _snake_case = 0 _snake_case = -1 _snake_case = True _snake_case = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 _snake_case = int(os.environ['''WORLD_SIZE'''] ) _snake_case = int(os.environ['''N_GPU_NODE'''] ) _snake_case = int(os.environ['''RANK'''] ) # number of nodes / node ID _snake_case = params.world_size // params.n_gpu_per_node _snake_case = params.global_rank // params.n_gpu_per_node _snake_case = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 _snake_case = 1 _snake_case = 0 _snake_case = 0 _snake_case = 0 _snake_case = 1 _snake_case = 1 _snake_case = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode _snake_case = params.node_id == 0 and params.local_rank == 0 _snake_case = params.n_nodes > 1 # summary _snake_case = f"""--- Global rank: {params.global_rank} - """ logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def snake_case ( lowerCAmelCase_ ) -> Dict: np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
103
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ): """simple docstring""" a :Optional[int] = ascii_letters + digits + punctuation return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" i -= len(UpperCAmelCase_ ) a :Tuple = i // 3 a :int = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) a :Union[str, Any] = ( chars_incl + random(UpperCAmelCase_ , quotient + remainder ) + random(UpperCAmelCase_ , UpperCAmelCase_ ) + random(UpperCAmelCase_ , UpperCAmelCase_ ) ) a :Dict = list(UpperCAmelCase_ ) shuffle(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) # random is a generalised function for letters, characters and numbers def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) ) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ): """simple docstring""" pass # Put your code here... def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ): """simple docstring""" if len(UpperCAmelCase_ ) < min_length: # Your Password must be at least 8 characters long return False a :Dict = any(char in ascii_uppercase for char in password ) a :Optional[int] = any(char in ascii_lowercase for char in password ) a :Tuple = any(char in digits for char in password ) a :Any = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowerCamelCase ( ): """simple docstring""" a :int = int(input('''Please indicate the max length of your password: ''' ).strip() ) a :Union[str, Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(UpperCAmelCase_ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
445
0
'''simple docstring''' def _a ( _lowercase : Any ): # noqa: E741 '''simple docstring''' __UpperCAmelCase : Any = len(lowerCAmelCase__ ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = [0] * n __UpperCAmelCase : List[Any] = [False] * n __UpperCAmelCase : int = [False] * n def dfs(_lowercase : Any , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[int] ): if parent == root: out_edge_count += 1 __UpperCAmelCase : int = True __UpperCAmelCase : int = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCAmelCase : int = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase : int = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCAmelCase : Tuple = True # AP found via cycle if at == low[to]: __UpperCAmelCase : List[str] = True else: __UpperCAmelCase : List[Any] = min(low[at] , lowerCAmelCase__ ) return out_edge_count for i in range(lowerCAmelCase__ ): if not visited[i]: __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : str = dfs(lowerCAmelCase__ , lowerCAmelCase__ , -1 , lowerCAmelCase__ ) __UpperCAmelCase : Dict = out_edge_count > 1 for x in range(len(lowerCAmelCase__ ) ): if is_art[x] is True: print(lowerCAmelCase__ ) # Adjacency list of graph __UpperCAmelCase :Optional[int] = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
706
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase :Optional[Any] = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase :Any = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase :int = ["LayoutLMv2FeatureExtractor"] __UpperCAmelCase :Optional[int] = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase :List[Any] = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
266
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : int = { 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class lowerCamelCase__ ( A ): """simple docstring""" __a = """roc_bert""" def __init__( self : Optional[Any] , UpperCamelCase : List[Any]=30_522 , UpperCamelCase : Dict=768 , UpperCamelCase : Tuple=12 , UpperCamelCase : Dict=12 , UpperCamelCase : List[str]=3_072 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : Any=True , UpperCamelCase : int=0 , UpperCamelCase : List[str]="absolute" , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=True , UpperCamelCase : Tuple=True , UpperCamelCase : Any=768 , UpperCamelCase : int=910 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Tuple=24_858 , UpperCamelCase : Optional[int]=True , **UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : str = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Dict = use_cache __UpperCAmelCase : str = enable_pronunciation __UpperCAmelCase : str = enable_shape __UpperCAmelCase : int = pronunciation_embed_dim __UpperCAmelCase : int = pronunciation_vocab_size __UpperCAmelCase : List[Any] = shape_embed_dim __UpperCAmelCase : Union[str, Any] = shape_vocab_size __UpperCAmelCase : int = concat_input __UpperCAmelCase : Optional[int] = position_embedding_type __UpperCAmelCase : Dict = classifier_dropout super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
139
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_0_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : Union[str, Any] = 1 __UpperCAmelCase : Optional[Any] = {1: 1} for inputa in range(2 , _UpperCamelCase ): __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : str = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: __UpperCAmelCase : Tuple = (3 * number) + 1 counter += 1 if inputa not in counters: __UpperCAmelCase : Optional[Any] = counter if counter > pre_counter: __UpperCAmelCase : List[Any] = inputa __UpperCAmelCase : List[Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
139
1
'''simple docstring''' import os from pathlib import Path def _UpperCamelCase ( ): """simple docstring""" from torch.utils.cpp_extension import load __UpperCamelCase : Optional[int] = Path(_a ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' __UpperCamelCase : Optional[Any] = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , _a , with_cuda=_a , extra_include_paths=[str(_a )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
720
'''simple docstring''' a= '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
287
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase__ : Dict = logging.get_logger(__name__) def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = b.T SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_ = np.matmul(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = aa[:, None] - 2 * ab + ba[None, :] return d def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_ = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase ) return np.argmin(__UpperCAmelCase , axis=1 ) class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = ["pixel_values"] def __init__( self : List[Any] , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 256, 'width': 256} SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase ) if clusters is not None else None SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = do_color_quantize def lowerCAmelCase_ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Tuple , ): SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" ) return resize( _lowerCAmelCase , size=(size['height'], size['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_ = rescale(image=_lowerCAmelCase , scale=1 / 127.5 , data_format=_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = image - 1 return image def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_lowerCAmelCase : Optional[Any] , ): SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_ = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_color_quantize and clusters is None: raise ValueError('Clusters must be specified if do_color_quantize is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowerCAmelCase ) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = color_quantize(_lowerCAmelCase , _lowerCAmelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_ = images.shape[0] SCREAMING_SNAKE_CASE_ = images.reshape(_lowerCAmelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] SCREAMING_SNAKE_CASE_ = {'input_ids': images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
31
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : List[str] =len(SCREAMING_SNAKE_CASE ) a__ : Optional[int] =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): a__ : Optional[int] =True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): a__ : str =False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: a__ : str =subset[i - 1][j] if arr[i - 1] <= j: a__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
563
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __a : def __init__( self : List[str] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : List[str]=True ,lowerCamelCase : List[Any]=False ,lowerCamelCase : List[Any]=10 ,lowerCamelCase : str=3 ,lowerCamelCase : Optional[Any]=32 * 8 ,lowerCamelCase : Optional[int]=32 * 8 ,lowerCamelCase : Any=4 ,lowerCamelCase : Any=64 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_auxiliary_loss __SCREAMING_SNAKE_CASE = num_queries __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_size __SCREAMING_SNAKE_CASE = max_size __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = hidden_dim __SCREAMING_SNAKE_CASE = hidden_dim def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=UpperCamelCase_ ) > 0.5 ).float() __SCREAMING_SNAKE_CASE = (torch.rand((self.batch_size, self.num_labels) ,device=UpperCamelCase_ ) > 0.5).long() __SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerConfig( hidden_size=self.hidden_dim ,) __SCREAMING_SNAKE_CASE = self.num_queries __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = [1, 1, 1, 1] __SCREAMING_SNAKE_CASE = self.num_channels __SCREAMING_SNAKE_CASE = 64 __SCREAMING_SNAKE_CASE = 128 __SCREAMING_SNAKE_CASE = self.hidden_dim __SCREAMING_SNAKE_CASE = self.hidden_dim __SCREAMING_SNAKE_CASE = self.hidden_dim return config def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = output.encoder_hidden_states __SCREAMING_SNAKE_CASE = output.pixel_decoder_hidden_states __SCREAMING_SNAKE_CASE = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCamelCase_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) ,config.decoder_layers ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any]=False ): '''simple docstring''' with torch.no_grad(): __SCREAMING_SNAKE_CASE = MaskaFormerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __SCREAMING_SNAKE_CASE = model(pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCamelCase_ ,UpperCamelCase_ ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() def comm_check_on_output(lowerCamelCase : List[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model( pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class __a ( _snake_case, _snake_case, unittest.TestCase ): __UpperCamelCase : List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCamelCase : Tuple = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {} __UpperCamelCase : List[str] = False __UpperCamelCase : Optional[int] = False __UpperCamelCase : Tuple = False __UpperCamelCase : Optional[Any] = False def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=UpperCamelCase_ ,has_text_modality=UpperCamelCase_ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ ,**UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase_ ) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' pass @unittest.skip(reason="""Mask2Former is not a generative model""" ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="""Mask2Former does not use token embeddings""" ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["pixel_values"] self.assertListEqual(arg_names[:1] ,UpperCamelCase_ ) @slow def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = (self.model_tester.min_size,) * 2 __SCREAMING_SNAKE_CASE = { "pixel_values": torch.randn((2, 3, *size) ,device=UpperCamelCase_ ), "mask_labels": torch.randn((2, 10, *size) ,device=UpperCamelCase_ ), "class_labels": torch.zeros(2 ,10 ,device=UpperCamelCase_ ).long(), } __SCREAMING_SNAKE_CASE = self.model_tester.get_config() __SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(UpperCamelCase_ ).to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ ,**UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ).to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ,output_attentions=UpperCamelCase_ ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' if not self.model_tester.is_training: return __SCREAMING_SNAKE_CASE = self.all_model_classes[1] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ ).loss loss.backward() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.all_model_classes[1] __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ).to(UpperCamelCase_ ) model.train() __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __SCREAMING_SNAKE_CASE = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __SCREAMING_SNAKE_CASE = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __SCREAMING_SNAKE_CASE = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCamelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) a = 1E-4 def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class __a ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(UpperCamelCase_ ,return_tensors="""pt""" ).to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase_ ,(1, 3, 384, 384) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = torch.tensor( [[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) ) __SCREAMING_SNAKE_CASE = torch.tensor( [[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval() __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(UpperCamelCase_ ,return_tensors="""pt""" ).to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase_ ,(1, 3, 384, 384) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ) # masks_queries_logits __SCREAMING_SNAKE_CASE = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __SCREAMING_SNAKE_CASE = [ [-8.7_839, -9.0_056, -8.8_121], [-7.4_104, -7.0_313, -6.5_401], [-6.6_105, -6.3_427, -6.4_675], ] __SCREAMING_SNAKE_CASE = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) ) # class_queries_logits __SCREAMING_SNAKE_CASE = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) ) __SCREAMING_SNAKE_CASE = torch.tensor( [ [1.8_324, -8.0_835, -4.1_922], [0.8_450, -9.0_050, -3.6_053], [0.3_045, -7.7_293, -3.0_275], ] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval() __SCREAMING_SNAKE_CASE = self.default_image_processor __SCREAMING_SNAKE_CASE = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) __SCREAMING_SNAKE_CASE = inputs["pixel_values"].to(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]] __SCREAMING_SNAKE_CASE = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]] with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None )
710
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class _lowerCAmelCase ( __magic_name__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : List[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=9_10 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : str , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = type_vocab_size UpperCamelCase = layer_norm_eps UpperCamelCase = use_cache UpperCamelCase = enable_pronunciation UpperCamelCase = enable_shape UpperCamelCase = pronunciation_embed_dim UpperCamelCase = pronunciation_vocab_size UpperCamelCase = shape_embed_dim UpperCamelCase = shape_vocab_size UpperCamelCase = concat_input UpperCamelCase = position_embedding_type UpperCamelCase = classifier_dropout super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
282
from functools import lru_cache @lru_cache def __lowerCamelCase ( _lowercase ) -> int: if num < 0: raise ValueError('Number should not be negative.' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
282
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Union[str, Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" SCREAMING_SNAKE_CASE__ = model(__A )["""last_hidden_state"""] SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __A ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
59
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _lowerCamelCase = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=UpperCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=UpperCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=UpperCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=UpperCamelCase__ , default=1_000 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=UpperCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=UpperCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) SCREAMING_SNAKE_CASE__ = parser.parse_args() return args def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ): def fn(UpperCamelCase__: Any ): return tokenizer(examples["""text"""] ) return fn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = [] for i in range(len(tokenized_data["""input_ids"""] ) ): SCREAMING_SNAKE_CASE__ = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } SCREAMING_SNAKE_CASE__ = tf.train.Features(feature=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = tf.train.Example(features=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = example.SerializeToString() records.append(UpperCamelCase__ ) return records def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: SCREAMING_SNAKE_CASE__ = min(len(UpperCamelCase__ ) , args.limit ) SCREAMING_SNAKE_CASE__ = dataset.select(range(UpperCamelCase__ ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split ) if not os.path.exists(UpperCamelCase__ ): os.makedirs(UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. SCREAMING_SNAKE_CASE__ = tokenize_function(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(UpperCamelCase__: int ): # Concatenate all texts. SCREAMING_SNAKE_CASE__ = {k: sum(examples[k] , [] ) for k in examples.keys()} SCREAMING_SNAKE_CASE__ = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 SCREAMING_SNAKE_CASE__ = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. SCREAMING_SNAKE_CASE__ = { k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )] for k, t in concatenated_examples.items() } return result SCREAMING_SNAKE_CASE__ = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_000 , num_proc=4 ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ): SCREAMING_SNAKE_CASE__ = grouped_dataset[shard : shard + args.shard_size] SCREAMING_SNAKE_CASE__ = len(dataset_snapshot["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) SCREAMING_SNAKE_CASE__ = get_serialized_examples(UpperCamelCase__ ) with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file: for i in range(len(UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE__ = serialized_examples[i] out_file.write(UpperCamelCase__ ) print("""Wrote file {} containing {} records""".format(UpperCamelCase__ , UpperCamelCase__ ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(f'''Total {args.split} records: {total_records}''' , file=UpperCamelCase__ ) if __name__ == "__main__": _lowerCamelCase = parse_args() main(args)
59
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ = field( default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ = field( default=__A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ = field( default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Whether tp freeze the encoder.'''} ) UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Whether to freeze the embeddings.'''} ) @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ = field( metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} ) UpperCamelCase_ = field( default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , ) UpperCamelCase_ = field( default=1024 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ = field( default=128 , metadata={ '''help''': ( '''The maximum total sequence length for target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for validation target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded. ''' '''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ''' '''during ``evaluate`` and ``predict``.''' ) } , ) UpperCamelCase_ = field( default=142 , metadata={ '''help''': ( '''The maximum total sequence length for test target text after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} ) UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} ) UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} ) UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Source language id for translation.'''} ) UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Target language id for translation.'''} ) UpperCamelCase_ = field(default=__A , metadata={'''help''': '''# num_beams to use for evaluation.'''} ) UpperCamelCase_ = field( default=__A , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , ) def lowercase_ ( __A : Dict , __A : Optional[Any] , __A : Union[str, Any] ) -> List[Any]: """simple docstring""" logger.info(F'***** {split} metrics *****' ) for key in sorted(metrics.keys() ): logger.info(F' {key} = {metrics[key]}' ) save_json(__A , os.path.join(__A , F'{split}_results.json' ) ) def lowercase_ ( ) -> int: """simple docstring""" lowercase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase : Optional[int] =parser.parse_args_into_dataclasses() check_output_dir(__A ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : Union[str, Any] =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Dict =('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__A , __A , __A ): assert hasattr(__A , __A ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute' setattr(__A , __A , getattr(__A , __A ) ) lowercase : Optional[Any] =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase : Dict =AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__A , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__A , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowercase : Dict =model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__A , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__A , __A ): lowercase : int =tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowercase : Tuple =tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__A ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowercase : int =SeqaSeqDataset # Get datasets lowercase : Optional[Any] =( dataset_class( __A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) lowercase : Optional[int] =( dataset_class( __A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowercase : Any =( dataset_class( __A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer lowercase : Dict =( build_compute_metrics_fn(data_args.task , __A ) if training_args.predict_with_generate else None ) lowercase : Optional[int] =SeqaSeqTrainer( model=__A , args=__A , data_args=__A , train_dataset=__A , eval_dataset=__A , data_collator=SeqaSeqDataCollator( __A , __A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__A , tokenizer=__A , ) lowercase : List[str] ={} # Training if training_args.do_train: logger.info('''*** Train ***''' ) lowercase : Optional[int] =trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowercase : Any =train_result.metrics lowercase : Optional[int] =data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __A , training_args.output_dir ) all_metrics.update(__A ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : int =trainer.evaluate(metric_key_prefix='''val''' ) lowercase : str =data_args.n_val lowercase : Tuple =round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __A , training_args.output_dir ) all_metrics.update(__A ) if training_args.do_predict: logger.info('''*** Predict ***''' ) lowercase : Union[str, Any] =trainer.predict(test_dataset=__A , metric_key_prefix='''test''' ) lowercase : List[Any] =test_output.metrics lowercase : Tuple =data_args.n_test if trainer.is_world_process_zero(): lowercase : Union[str, Any] =round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __A , training_args.output_dir ) all_metrics.update(__A ) if training_args.predict_with_generate: lowercase : Any =tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) lowercase : Tuple =lmap(str.strip , __A ) write_txt_file(__A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__A , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def lowercase_ ( __A : str ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
94
import tensorflow as tf from ...tf_utils import shape_list class a ( tf.keras.layers.Layer ): def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :str ,__lowercase :List[str]=1 ,__lowercase :Optional[Any]=False ,**__lowercase :str ): super().__init__(**__lowercase ) snake_case__ : Union[str, Any] = vocab_size snake_case__ : Dict = d_embed snake_case__ : Dict = d_proj snake_case__ : str = cutoffs + [vocab_size] snake_case__ : Tuple = [0] + self.cutoffs snake_case__ : Optional[int] = div_val snake_case__ : Optional[int] = self.cutoffs[0] snake_case__ : int = len(self.cutoffs ) - 1 snake_case__ : Any = self.shortlist_size + self.n_clusters snake_case__ : List[str] = keep_order snake_case__ : Tuple = [] snake_case__ : str = [] def __lowerCamelCase ( self :str ,__lowercase :Optional[int] ): if self.n_clusters > 0: snake_case__ : Tuple = self.add_weight( shape=(self.n_clusters, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_weight''' ) snake_case__ : Optional[int] = self.add_weight( shape=(self.n_clusters,) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_bias''' ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: snake_case__ : int = self.add_weight( shape=(self.d_embed, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_projs_._{i}""" ,) self.out_projs.append(__lowercase ) else: self.out_projs.append(__lowercase ) snake_case__ : Optional[int] = self.add_weight( shape=(self.vocab_size, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._weight""" ,) snake_case__ : int = self.add_weight( shape=(self.vocab_size,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._bias""" ,) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): snake_case__ , snake_case__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case__ : List[str] = self.d_embed // (self.div_val**i) snake_case__ : str = self.add_weight( shape=(d_emb_i, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_projs_._{i}""" ) self.out_projs.append(__lowercase ) snake_case__ : Optional[int] = self.add_weight( shape=(r_idx - l_idx, d_emb_i) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._weight""" ,) snake_case__ : Optional[int] = self.add_weight( shape=(r_idx - l_idx,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._bias""" ,) self.out_layers.append((weight, bias) ) super().build(__lowercase ) @staticmethod def __lowerCamelCase ( __lowercase :List[str] ,__lowercase :int ,__lowercase :str ,__lowercase :Dict=None ): snake_case__ : Dict = x if proj is not None: snake_case__ : str = tf.einsum('''ibd,ed->ibe''' ,__lowercase ,__lowercase ) return tf.einsum('''ibd,nd->ibn''' ,__lowercase ,__lowercase ) + b @staticmethod def __lowerCamelCase ( __lowercase :int ,__lowercase :Any ): snake_case__ : Union[str, Any] = shape_list(__lowercase ) snake_case__ : int = tf.range(lp_size[0] ,dtype=target.dtype ) snake_case__ : Tuple = tf.stack([r, target] ,1 ) return tf.gather_nd(__lowercase ,__lowercase ) def __lowerCamelCase ( self :str ,__lowercase :Optional[int] ,__lowercase :Tuple ,__lowercase :Union[str, Any]=True ,__lowercase :str=False ): snake_case__ : Any = 0 if self.n_clusters == 0: snake_case__ : int = self._logit(__lowercase ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] ) if target is not None: snake_case__ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowercase ,logits=__lowercase ) snake_case__ : Dict = tf.nn.log_softmax(__lowercase ,axis=-1 ) else: snake_case__ : Any = shape_list(__lowercase ) snake_case__ : Dict = [] snake_case__ : Tuple = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): snake_case__ , snake_case__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: snake_case__ : List[str] = (target >= l_idx) & (target < r_idx) snake_case__ : Dict = tf.where(__lowercase ) snake_case__ : Any = tf.boolean_mask(__lowercase ,__lowercase ) - l_idx if self.div_val == 1: snake_case__ : List[Any] = self.out_layers[0][0][l_idx:r_idx] snake_case__ : Dict = self.out_layers[0][1][l_idx:r_idx] else: snake_case__ : Optional[int] = self.out_layers[i][0] snake_case__ : Optional[int] = self.out_layers[i][1] if i == 0: snake_case__ : Dict = tf.concat([cur_W, self.cluster_weight] ,0 ) snake_case__ : Union[str, Any] = tf.concat([cur_b, self.cluster_bias] ,0 ) snake_case__ : List[str] = self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[0] ) snake_case__ : Tuple = tf.nn.log_softmax(__lowercase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: snake_case__ : Any = tf.boolean_mask(__lowercase ,__lowercase ) snake_case__ : Union[str, Any] = self._gather_logprob(__lowercase ,__lowercase ) else: snake_case__ : Tuple = self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[i] ) snake_case__ : Dict = tf.nn.log_softmax(__lowercase ) snake_case__ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster snake_case__ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__lowercase ) if target is not None: snake_case__ : Dict = tf.boolean_mask(__lowercase ,__lowercase ) snake_case__ : Tuple = tf.boolean_mask(__lowercase ,__lowercase ) snake_case__ : str = self._gather_logprob(__lowercase ,__lowercase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__lowercase ,-cur_logprob ,shape_list(__lowercase ) ) snake_case__ : Any = tf.concat(__lowercase ,axis=-1 ) if target is not None: if return_mean: snake_case__ : List[str] = tf.reduce_mean(__lowercase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__lowercase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__lowercase ,name=self.name ,aggregation='''mean''' if return_mean else '''''' ) return out
252
0
from __future__ import annotations from dataclasses import dataclass @dataclass class __magic_name__ : lowercase : Tuple =42 lowercase : Any =None lowercase : List[Any] =None def lowerCamelCase_(lowerCamelCase_ ) -> List[Any]: # Validation def is_valid_tree(lowerCamelCase_ ) -> bool: if node is None: return True if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(lowerCAmelCase__ ): raise ValueError( "Each node should be type of TreeNode and data should be float." ) def is_binary_search_tree_recursive_check( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , lowerCAmelCase__ ) ) return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float("inf" ) , float("inf" ) ) if __name__ == "__main__": import doctest doctest.testmod()
714
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase : List[Any] = logging.get_logger(__name__) class __magic_name__ ( A__ ): lowercase : Tuple =['''pixel_values'''] def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_55 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[str] , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase__ ) UpperCAmelCase = size if size is not None else {"shortest_edge": 3_84} UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) UpperCAmelCase = do_resize UpperCAmelCase = size # Default value set here for backwards compatibility where the value in config is None UpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56 UpperCAmelCase = resample UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : float , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray: '''simple docstring''' UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) UpperCAmelCase = size["shortest_edge"] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct UpperCAmelCase = int(shortest_edge / crop_pct ) UpperCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ ) UpperCAmelCase = resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=UpperCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) else: # warping (no cropping) when evaluated at 384 or larger return resize( UpperCamelCase__ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> List[str]: '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ) -> PIL.Image.Image: '''simple docstring''' UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) UpperCAmelCase = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , crop_pct=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] UpperCAmelCase = {"pixel_values": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
457
0
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated A_ : List[str] =collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ A_ : Any ="""https://storage.googleapis.com/cvdf-datasets/mnist/""" def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Optional[int]: _lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0] @deprecated(snake_case , 'Please use tf.data to implement this functionality.' ) def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> Dict: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=snake_case ) as bytestream: _lowerCamelCase = _readaa(snake_case ) if magic != 2_051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) _lowerCamelCase = _readaa(snake_case ) _lowerCamelCase = _readaa(snake_case ) _lowerCamelCase = _readaa(snake_case ) _lowerCamelCase = bytestream.read(rows * cols * num_images ) _lowerCamelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta ) _lowerCamelCase = data.reshape(snake_case , snake_case , snake_case , 1 ) return data @deprecated(snake_case , 'Please use tf.one_hot on tensors.' ) def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : List[str] )-> Tuple: _lowerCamelCase = labels_dense.shape[0] _lowerCamelCase = numpy.arange(snake_case ) * num_classes _lowerCamelCase = numpy.zeros((num_labels, num_classes) ) _lowerCamelCase = 1 return labels_one_hot @deprecated(snake_case , 'Please use tf.data to implement this functionality.' ) def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[int]=False , snake_case : str=10 )-> List[Any]: print('Extracting' , f.name ) with gzip.GzipFile(fileobj=snake_case ) as bytestream: _lowerCamelCase = _readaa(snake_case ) if magic != 2_049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) _lowerCamelCase = _readaa(snake_case ) _lowerCamelCase = bytestream.read(snake_case ) _lowerCamelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(snake_case , snake_case ) return labels class __a : @deprecated( a__ , 'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' , ) def __init__( self , a__ , a__ , a__=False , a__=False , a__=dtypes.floataa , a__=True , a__=None , ): _lowerCamelCase , _lowerCamelCase = random_seed.get_seed(a__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) _lowerCamelCase = dtypes.as_dtype(a__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: _lowerCamelCase = 1_00_00 _lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' _lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 _lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. _lowerCamelCase = images.astype(numpy.floataa ) _lowerCamelCase = numpy.multiply(a__ , 1.0 / 255.0 ) _lowerCamelCase = images _lowerCamelCase = labels _lowerCamelCase = 0 _lowerCamelCase = 0 @property def snake_case_ ( self ): return self._images @property def snake_case_ ( self ): return self._labels @property def snake_case_ ( self ): return self._num_examples @property def snake_case_ ( self ): return self._epochs_completed def snake_case_ ( self , a__ , a__=False , a__=True ): if fake_data: _lowerCamelCase = [1] * 7_84 _lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(a__ )], [fake_label for _ in range(a__ )], ) _lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: _lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(a__ ) _lowerCamelCase = self.images[perma] _lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch _lowerCamelCase = self._num_examples - start _lowerCamelCase = self._images[start : self._num_examples] _lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: _lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(a__ ) _lowerCamelCase = self.images[perm] _lowerCamelCase = self.labels[perm] # Start next epoch _lowerCamelCase = 0 _lowerCamelCase = batch_size - rest_num_examples _lowerCamelCase = self._index_in_epoch _lowerCamelCase = self._images[start:end] _lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size _lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(snake_case , 'Please write your own downloading logic.' ) def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : int )-> int: if not gfile.Exists(snake_case ): gfile.MakeDirs(snake_case ) _lowerCamelCase = os.path.join(snake_case , snake_case ) if not gfile.Exists(snake_case ): urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310 with gfile.GFile(snake_case ) as f: _lowerCamelCase = f.size() print('Successfully downloaded' , snake_case , snake_case , 'bytes.' ) return filepath @deprecated( snake_case , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Dict=False , snake_case : Tuple=False , snake_case : str=dtypes.floataa , snake_case : str=True , snake_case : Union[str, Any]=5_000 , snake_case : List[Any]=None , snake_case : str=DEFAULT_SOURCE_URL , )-> Tuple: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case ) _lowerCamelCase = fake() _lowerCamelCase = fake() _lowerCamelCase = fake() return _Datasets(train=snake_case , validation=snake_case , test=snake_case ) if not source_url: # empty string check _lowerCamelCase = DEFAULT_SOURCE_URL _lowerCamelCase = 'train-images-idx3-ubyte.gz' _lowerCamelCase = 'train-labels-idx1-ubyte.gz' _lowerCamelCase = 't10k-images-idx3-ubyte.gz' _lowerCamelCase = 't10k-labels-idx1-ubyte.gz' _lowerCamelCase = _maybe_download( snake_case , snake_case , source_url + train_images_file ) with gfile.Open(snake_case , 'rb' ) as f: _lowerCamelCase = _extract_images(snake_case ) _lowerCamelCase = _maybe_download( snake_case , snake_case , source_url + train_labels_file ) with gfile.Open(snake_case , 'rb' ) as f: _lowerCamelCase = _extract_labels(snake_case , one_hot=snake_case ) _lowerCamelCase = _maybe_download( snake_case , snake_case , source_url + test_images_file ) with gfile.Open(snake_case , 'rb' ) as f: _lowerCamelCase = _extract_images(snake_case ) _lowerCamelCase = _maybe_download( snake_case , snake_case , source_url + test_labels_file ) with gfile.Open(snake_case , 'rb' ) as f: _lowerCamelCase = _extract_labels(snake_case , one_hot=snake_case ) if not 0 <= validation_size <= len(snake_case ): _lowerCamelCase = ( 'Validation size should be between 0 and ' f'{len(snake_case )}. Received: {validation_size}.' ) raise ValueError(snake_case ) _lowerCamelCase = train_images[:validation_size] _lowerCamelCase = train_labels[:validation_size] _lowerCamelCase = train_images[validation_size:] _lowerCamelCase = train_labels[validation_size:] _lowerCamelCase = {'dtype': dtype, 'reshape': reshape, 'seed': seed} _lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case ) _lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case ) _lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case ) return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
650
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class __a ( lowerCAmelCase__ ): def __init__( self , a__ , a__=None , a__=True , a__=None , **a__ ): _lowerCamelCase = parent _lowerCamelCase = config_class _lowerCamelCase = has_text_modality _lowerCamelCase = kwargs _lowerCamelCase = common_properties def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict ) _lowerCamelCase = ( ['hidden_size', 'num_attention_heads', 'num_hidden_layers'] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(a__ , a__ ) , msg=F'`{prop}` does not exist' ) # Test that config has the common properties as setter for idx, name in enumerate(a__ ): try: setattr(a__ , a__ , a__ ) self.parent.assertEqual( getattr(a__ , a__ ) , a__ , msg=F'`{name} value {idx} expected, but was {getattr(a__ , a__ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(a__ ): try: _lowerCamelCase = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(a__ , a__ ) , a__ , msg=F'`{name} value {idx} expected, but was {getattr(a__ , a__ )}' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict ) _lowerCamelCase = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , a__ ) def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase = os.path.join(a__ , 'config.json' ) config_first.to_json_file(a__ ) _lowerCamelCase = self.config_class.from_json_file(a__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(a__ ) _lowerCamelCase = self.config_class.from_pretrained(a__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict ) _lowerCamelCase = 'test' with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase = os.path.join(a__ , a__ ) config_first.save_pretrained(a__ ) _lowerCamelCase = self.config_class.from_pretrained(a__ , subfolder=a__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def snake_case_ ( self ): _lowerCamelCase = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowerCamelCase = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def snake_case_ ( self ): if self.config_class.is_composition: return _lowerCamelCase = self.config_class() self.parent.assertIsNotNone(a__ ) def snake_case_ ( self ): _lowerCamelCase = copy.deepcopy(a__ ) _lowerCamelCase = self.config_class(**a__ ) _lowerCamelCase = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(a__ , a__ ) != value: wrong_values.append((key, getattr(a__ , a__ ), value) ) if len(a__ ) > 0: _lowerCamelCase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] ) raise ValueError(F'The following keys were not properly set in the config:\n{errors}' ) def snake_case_ ( self ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
650
1
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowercase_ ( _lowercase ) -> Dict: '''simple docstring''' lowerCamelCase_ : Dict = checkpoints.load_tax_checkpoint(_lowercase ) lowerCamelCase_ : Dict = flatten_dict(_lowercase ) return flax_params def lowercase_ ( _lowercase ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ : List[str] = {} lowerCamelCase_ : str = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } lowerCamelCase_ : Tuple = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCamelCase_ : Union[str, Any] = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCamelCase_ : Dict = new_key.replace(_lowercase , _lowercase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCamelCase_ : int = new_key.replace(_lowercase , _lowercase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCamelCase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _lowercase ) lowerCamelCase_ : Optional[int] = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCamelCase_ : List[str] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _lowercase ) lowerCamelCase_ : Optional[int] = flax_dict[key] lowerCamelCase_ : str = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCamelCase_ : str = torch.from_numpy(converted_dict[key].T ) else: lowerCamelCase_ : Tuple = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowercase_ ( _lowercase , _lowercase , _lowercase=False , _lowercase=False ) -> List[str]: '''simple docstring''' lowerCamelCase_ : str = get_flax_param(_lowercase ) if not use_large: lowerCamelCase_ : List[str] = PixaStructVisionConfig() lowerCamelCase_ : List[str] = PixaStructTextConfig() else: lowerCamelCase_ : Any = PixaStructVisionConfig( hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCamelCase_ : Any = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 ) lowerCamelCase_ : Union[str, Any] = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowercase ) lowerCamelCase_ : Optional[Any] = PixaStructForConditionalGeneration(_lowercase ) lowerCamelCase_ : str = rename_and_convert_flax_params(_lowercase ) model.load_state_dict(_lowercase ) lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCamelCase_ : List[str] = PixaStructImageProcessor() lowerCamelCase_ : int = PixaStructProcessor(image_processor=_lowercase , tokenizer=_lowercase ) if use_large: lowerCamelCase_ : List[Any] = 4_096 lowerCamelCase_ : Tuple = True # mkdir if needed os.makedirs(_lowercase , exist_ok=_lowercase ) model.save_pretrained(_lowercase ) processor.save_pretrained(_lowercase ) print('''Model saved in {}'''.format(_lowercase ) ) if __name__ == "__main__": __lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') __lowercase : int = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
357
'''simple docstring''' def lowercase_ ( _lowercase , _lowercase ) -> Dict: '''simple docstring''' lowerCamelCase_ : List[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ : str = 0 while b > 0: if b & 1: lowerCamelCase_ : Optional[int] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
357
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass UpperCAmelCase_ = (3, 9, -1_1, 0, 7, 5, 1, -1) UpperCAmelCase_ = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class lowerCAmelCase_ : '''simple docstring''' lowerCAmelCase_ : int lowerCAmelCase_ : Node | None class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : Iterable[int] ): """simple docstring""" UpperCAmelCase__ = None for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ): UpperCAmelCase__ = Node(_UpperCAmelCase , self.head ) def __iter__( self : str ): """simple docstring""" UpperCAmelCase__ = self.head while node: yield node.data UpperCAmelCase__ = node.next_node def __len__( self : int ): """simple docstring""" return sum(1 for _ in self ) def __str__( self : Tuple ): """simple docstring""" return " -> ".join([str(_UpperCAmelCase ) for node in self] ) def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
603
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def __lt__( self : List[Any] , _UpperCAmelCase : Dict ): """simple docstring""" return self[-1] < other[-1] def __eq__( self : str , _UpperCAmelCase : Tuple ): """simple docstring""" return self[-1] == other[-1] def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list ): '''simple docstring''' UpperCAmelCase__ = [] # sort into stacks for element in collection: UpperCAmelCase__ = Stack([element] ) UpperCAmelCase__ = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if i != len(SCREAMING_SNAKE_CASE__ ): stacks[i].append(SCREAMING_SNAKE_CASE__ ) else: stacks.append(SCREAMING_SNAKE_CASE__ ) # use a heap-based merge to merge stack efficiently UpperCAmelCase__ = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) ) return collection if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
603
1
from __future__ import annotations from typing import Any class UpperCamelCase : '''simple docstring''' def __init__( self , UpperCamelCase_ ): lowercase_ :List[str] = num_of_nodes lowercase_ :list[list[int]] = [] lowercase_ :dict[int, int] = {} def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): self.m_edges.append([u_node, v_node, weight] ) def UpperCamelCase ( self , UpperCamelCase_ ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def UpperCamelCase ( self , UpperCamelCase_ ): if self.m_component[u_node] != u_node: for k in self.m_component: lowercase_ :str = self.find_component(UpperCamelCase_ ) def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): if component_size[u_node] <= component_size[v_node]: lowercase_ :List[Any] = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCamelCase_ ) elif component_size[u_node] >= component_size[v_node]: lowercase_ :List[Any] = self.find_component(UpperCamelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCamelCase_ ) def UpperCamelCase ( self ): lowercase_ :int = [] lowercase_ :str = 0 lowercase_ :list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowercase_ :List[Any] = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowercase_ , lowercase_ , lowercase_ :int = edge lowercase_ :Optional[int] = self.m_component[u] lowercase_ :List[str] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowercase_ :List[str] = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowercase_ , lowercase_ , lowercase_ :int = edge lowercase_ :List[Any] = self.m_component[u] lowercase_ :Dict = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 lowercase_ :str = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}" ) def UpperCamelCase ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
441
def UpperCamelCase ( _a , _a , _a ) -> int: '''simple docstring''' def count_of_possible_combinations(_a ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(_a ) def UpperCamelCase ( _a , _a , _a ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( _a , _a ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ :Optional[int] = sum( count_of_possible_combinations_with_dp_array(target - item , _a ) for item in array ) lowercase_ :List[Any] = answer return answer lowercase_ :Dict = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(_a , _a ) def UpperCamelCase ( _a , _a , _a ) -> int: '''simple docstring''' lowercase_ :Optional[int] = [0] * (target + 1) lowercase_ :int = 1 for i in range(1 , target + 1 ): for j in range(_a ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : Optional[Any] = 3 SCREAMING_SNAKE_CASE : Tuple = 5 SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 5] print(combination_sum_iv(n, array, target))
441
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () a_ : List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). a_ : List[Any] = [0, 2_5, 5_0] a_ : Any = [2_5, 5_0, 7_5] a_ : str = fuzz.membership.trimf(X, abca) a_ : int = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. a_ : List[str] = np.ones(7_5) a_ : List[str] = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) a_ : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) a_ : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) a_ : int = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) a_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] a_ : int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) a_ : Optional[int] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] a_ : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] a_ : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
623
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = ComputeEnvironment.AMAZON_SAGEMAKER A_ = True A_ = '''ml.p3.2xlarge''' A_ = '''accelerate_sagemaker_execution_role''' A_ = '''hf-sm''' A_ = '''us-east-1''' A_ = 1 A_ = '''accelerate-sagemaker-1''' A_ = '''1.6''' A_ = '''4.4''' A_ = '''train.py''' A_ = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] A_ = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> List[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args) assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_) assert isinstance(converted_args['''do_train'''] , lowerCamelCase_) assert isinstance(converted_args['''epochs'''] , lowerCamelCase_) assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_) assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_) with pytest.raises(lowerCamelCase_): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
34
0
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A_ : Dict = logging.getLogger(__name__) A_ : int = "Hello world! cécé herlolip" A_ : Dict = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : List[str] ) -> List[Any]: '''simple docstring''' snake_case__ : str = BertAbsConfig( temp_dir=""".""" , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) snake_case__ : Optional[Any] = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage ) snake_case__ : int = AbsSummarizer(__magic_name__ , torch.device("""cpu""" ) , __magic_name__ ) original.eval() snake_case__ : Optional[int] = BertAbsSummarizer(__magic_name__ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) snake_case__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs snake_case__ : Any = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) ) snake_case__ : int = torch.tensor(__magic_name__ ).unsqueeze(0 ) snake_case__ : int = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) ) snake_case__ : Tuple = torch.tensor(__magic_name__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass snake_case__ : Optional[int] = encoder_input_ids snake_case__ : Tuple = decoder_input_ids snake_case__ : int = None snake_case__ : Optional[Any] = None snake_case__ : Optional[int] = None snake_case__ : Optional[Any] = None snake_case__ : str = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical snake_case__ : Union[str, Any] = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0] snake_case__ : Dict = original.generator(__magic_name__ ) snake_case__ : Optional[Any] = new_model( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0] snake_case__ : Union[str, Any] = new_model.generator(__magic_name__ ) snake_case__ : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) ) snake_case__ : Optional[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) ) snake_case__ : Dict = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": A_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) A_ : Union[str, Any] = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
419
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any ) -> Optional[Any]: '''simple docstring''' snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) snake_case__ : Optional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__ ) snake_case__ : str = checkpoints.load_tax_checkpoint(__magic_name__ ) snake_case__ : Optional[int] = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": snake_case__ : Tuple = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": snake_case__ : List[Any] = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case__ : List[str] = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): snake_case__ : Union[str, Any] = f"layers_{str(__magic_name__ )}" # Self-Attention snake_case__ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] snake_case__ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization snake_case__ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case__ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case__ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case__ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case__ : int = flax_model.params["""encoder"""]["""block"""][str(__magic_name__ )]["""layer"""] snake_case__ : int = tax_attention_key snake_case__ : Optional[int] = tax_attention_out snake_case__ : Union[str, Any] = tax_attention_query snake_case__ : Tuple = tax_attention_value snake_case__ : Dict = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case__ : Optional[int] = tax_global_layer_norm if split_mlp_wi: snake_case__ : List[str] = tax_mlp_wi_a snake_case__ : Any = tax_mlp_wi_a else: snake_case__ : Union[str, Any] = tax_mlp_wi snake_case__ : Optional[Any] = tax_mlp_wo snake_case__ : List[str] = tax_mlp_layer_norm snake_case__ : List[Any] = flax_model_encoder_layer_block # Only for layer 0: snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case__ : str = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": snake_case__ : str = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T snake_case__ : Dict = tax_encoder_global_rel_embedding # Assigning snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] snake_case__ : Dict = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): snake_case__ : Optional[Any] = f"layers_{str(__magic_name__ )}" # Self-Attention snake_case__ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] snake_case__ : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] snake_case__ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] snake_case__ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization snake_case__ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention snake_case__ : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] snake_case__ : int = tax_enc_dec_attention_module["""key"""]["""kernel"""] snake_case__ : Any = tax_enc_dec_attention_module["""out"""]["""kernel"""] snake_case__ : Union[str, Any] = tax_enc_dec_attention_module["""query"""]["""kernel"""] snake_case__ : int = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization snake_case__ : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: snake_case__ : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] snake_case__ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: snake_case__ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] snake_case__ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization snake_case__ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning snake_case__ : Union[str, Any] = flax_model.params["""decoder"""]["""block"""][str(__magic_name__ )]["""layer"""] snake_case__ : Dict = tax_attention_key snake_case__ : int = tax_attention_out snake_case__ : str = tax_attention_query snake_case__ : Any = tax_attention_value snake_case__ : List[Any] = tax_pre_attention_layer_norm snake_case__ : Any = tax_enc_dec_attention_key snake_case__ : Any = tax_enc_dec_attention_out snake_case__ : Any = tax_enc_dec_attention_query snake_case__ : str = tax_enc_dec_attention_value snake_case__ : Dict = tax_cross_layer_norm if split_mlp_wi: snake_case__ : Tuple = tax_mlp_wi_a snake_case__ : Dict = tax_mlp_wi_a else: snake_case__ : int = tax_mlp_wi snake_case__ : List[Any] = tax_mlp_wo snake_case__ : Union[str, Any] = txa_mlp_layer_norm snake_case__ : int = flax_model_decoder_layer_block # Decoder Normalization snake_case__ : str = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] snake_case__ : List[str] = txa_decoder_norm # Only for layer 0: snake_case__ : List[str] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T snake_case__ : Any = tax_decoder_rel_embedding # Token Embeddings snake_case__ : Optional[Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""] snake_case__ : Optional[int] = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: snake_case__ : str = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(__magic_name__ ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) A_ : Dict = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
419
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowercase : Any = logging.get_logger(__name__) def lowercase ( __A : Optional[Any] ) -> Dict: '''simple docstring''' snake_case : Dict = """huggingface/label-files""" snake_case : int = """imagenet-1k-id2label.json""" snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) snake_case : Any = {int(__A ): v for k, v in idalabel.items()} snake_case : Dict = {v: k for k, v in idalabel.items()} snake_case : Any = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" snake_case : List[Any] = BitConfig( conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , ) return config def lowercase ( __A : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "stem.conv" in name: snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: snake_case : List[str] = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): snake_case : Optional[Any] = """bit.""" + name if "bit" not in name and "classifier" not in name: snake_case : Tuple = """bit.encoder.""" + name return name def lowercase ( ) -> Optional[int]: '''simple docstring''' snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw ) return im @torch.no_grad() def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]: '''simple docstring''' snake_case : str = get_config(__A ) # load original model from timm snake_case : Tuple = create_model(__A , pretrained=__A ) timm_model.eval() # load state_dict of original model snake_case : List[str] = timm_model.state_dict() for key in state_dict.copy().keys(): snake_case : List[Any] = state_dict.pop(__A ) snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val # load HuggingFace model snake_case : List[Any] = BitForImageClassification(__A ) model.eval() model.load_state_dict(__A ) # create image processor snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) ) snake_case : Optional[Any] = transform.transforms snake_case : List[Any] = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } snake_case : Union[str, Any] = BitImageProcessor( do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case : Dict = prepare_img() snake_case : List[str] = transform(__A ).unsqueeze(0 ) snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(__A , __A ) # verify logits with torch.no_grad(): snake_case : Optional[int] = model(__A ) snake_case : Dict = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) snake_case : int = timm_model(__A ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__A , outputs.logits , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(__A ).mkdir(exist_ok=__A ) print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__A ) processor.save_pretrained(__A ) if push_to_hub: print(f"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(f"""ybelkada/{model_name}""" ) processor.push_to_hub(f"""ybelkada/{model_name}""" ) if __name__ == "__main__": __lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''resnetv2_50x1_bitm''', type=str, help='''Name of the BiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub.''', ) __lowercase : Union[str, Any] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
36
"""simple docstring""" def A__ ( A__ ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(A__ , A__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(A__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
426
0
'''simple docstring''' from collections import deque class __SCREAMING_SNAKE_CASE : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->None: '''simple docstring''' __a = process_name # process name __a = arrival_time # arrival time of the process # completion time of finished process or last interrupted time __a = arrival_time __a = burst_time # remaining burst time __a = 0 # total time of the process wait in ready queue __a = 0 # time from arrival time to completion time class __SCREAMING_SNAKE_CASE : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->None: '''simple docstring''' __a = number_of_queues # time slice of queues that round robin algorithm applied __a = time_slices # unfinished process is in this ready_queue __a = queue # current time __a = current_time # finished process is in this sequence queue __a = deque() def __UpperCamelCase ( self ) ->list[str]: '''simple docstring''' __a = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]: '''simple docstring''' __a = [] for i in range(len(lowerCamelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]: '''simple docstring''' __a = [] for i in range(len(lowerCamelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]: '''simple docstring''' __a = [] for i in range(len(lowerCamelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]: '''simple docstring''' return [q.burst_time for q in queue] def __UpperCamelCase ( self , lowerCamelCase ) ->int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def __UpperCamelCase ( self , lowerCamelCase ) ->deque[Process]: '''simple docstring''' __a = deque() # sequence deque of finished process while len(lowerCamelCase ) != 0: __a = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowerCamelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 __a = 0 # set the process's turnaround time because it is finished __a = self.current_time - cp.arrival_time # set the completion time __a = self.current_time # add the process to queue that has finished queue finished.append(lowerCamelCase ) self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->tuple[deque[Process], deque[Process]]: '''simple docstring''' __a = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowerCamelCase ) ): __a = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowerCamelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time __a = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowerCamelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished __a = 0 # set the finish time __a = self.current_time # update the process' turnaround time because it is finished __a = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowerCamelCase ) self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def __UpperCamelCase ( self ) ->deque[Process]: '''simple docstring''' # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): __a , __a = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __UpperCamelCase : str = Process("""P1""", 0, 53) __UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17) __UpperCamelCase : Union[str, Any] = Process("""P3""", 0, 68) __UpperCamelCase : Optional[Any] = Process("""P4""", 0, 24) __UpperCamelCase : int = 3 __UpperCamelCase : Any = [17, 25] __UpperCamelCase : Tuple = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])}) __UpperCamelCase : Optional[int] = Process("""P1""", 0, 53) __UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17) __UpperCamelCase : Union[str, Any] = Process("""P3""", 0, 68) __UpperCamelCase : Optional[int] = Process("""P4""", 0, 24) __UpperCamelCase : Optional[Any] = 3 __UpperCamelCase : Union[str, Any] = [17, 25] __UpperCamelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa]) __UpperCamelCase : int = MLFQ(number_of_queues, time_slices, queue, 0) __UpperCamelCase : Optional[Any] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( f"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( f"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( f"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
718
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> Dict: """simple docstring""" __a , __a = image.size __a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 __a = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] ) __a = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_5_5.0 __a = image[None].transpose(0, 3, 1, 2 ) __a = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) return 2.0 * image - 1.0 class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ): def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Dict: '''simple docstring''' super().__init__() self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase ) @torch.no_grad() def __call__( self , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 100 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) ->Union[Tuple, ImagePipelineOutput]: '''simple docstring''' if isinstance(lowerCamelCase , PIL.Image.Image ): __a = 1 elif isinstance(lowerCamelCase , torch.Tensor ): __a = image.shape[0] else: raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" ) if isinstance(lowerCamelCase , PIL.Image.Image ): __a = preprocess(lowerCamelCase ) __a , __a = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image __a = (batch_size, self.unet.config.in_channels // 2, height, width) __a = next(self.unet.parameters() ).dtype __a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase ) __a = image.to(device=self.device , dtype=lowerCamelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(lowerCamelCase , device=self.device ) __a = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler __a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a = {} if accepts_eta: __a = eta for t in self.progress_bar(lowerCamelCase ): # concat latents and low resolution image in the channel dimension. __a = torch.cat([latents, image] , dim=1 ) __a = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase ) # predict the noise residual __a = self.unet(lowerCamelCase , lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 __a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample # decode the image latents with the VQVAE __a = self.vqvae.decode(lowerCamelCase ).sample __a = torch.clamp(lowerCamelCase , -1.0 , 1.0 ) __a = image / 2 + 0.5 __a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
270
0
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase__ ( unittest.TestCase ): def __a ( self : Any ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() def __a ( self : Union[str, Any] ): '''simple docstring''' a__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) a__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) a__ = "xvjiarui/stable-diffusion-2-inpainting" a__ , a__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase , safety_checker=lowerCamelCase ) a__ = "Face of a yellow cat, high resolution, sitting on a park bench" a__ = jax.random.PRNGKey(0 ) a__ = 5_0 a__ = jax.device_count() a__ = num_samples * [prompt] a__ = num_samples * [init_image] a__ = num_samples * [mask_image] a__ , a__ , a__ = pipeline.prepare_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # shard inputs and rng a__ = replicate(lowerCamelCase ) a__ = jax.random.split(lowerCamelCase , jax.device_count() ) a__ = shard(lowerCamelCase ) a__ = shard(lowerCamelCase ) a__ = shard(lowerCamelCase ) a__ = pipeline( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase ) a__ = output.images.reshape(lowerCamelCase , 5_1_2 , 5_1_2 , 3 ) a__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
489
'''simple docstring''' def _lowerCamelCase (__lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__lowerCamelCase ) ) def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool: # Base Case if index == len(__lowerCamelCase ): return True # Recursive Step for i in range(__lowerCamelCase ): if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ): # Color current vertex a__ = i # Validate coloring if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ): return True # Backtrack a__ = -1 return False def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]: a__ = [-1] * len(__lowerCamelCase ) if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ): return colored_vertices return []
489
1
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : Dict = """The Nymphenburg Palace is a beautiful palace in Munich!""" def lowercase__ ( __A: Optional[int] ,__A: int ): '''simple docstring''' __magic_name__ : Union[str, Any] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_0_2_4, "hidden_size": 7_6_8, "max_length": 5_1_2, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_0_2_4, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } __magic_name__ : int = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __magic_name__ : List[Any] = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=__A ,output_all_encodings=__A ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,__A ) ,) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __magic_name__ : List[str] = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __magic_name__ : Tuple = os.path.join(get_home_dir() ,'''models''' ) __magic_name__ : Tuple = _load_vocab(__A ,__A ,__A ,cls=__A ) __magic_name__ : Any = nlp.model.BERTModel( __A ,len(__A ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=__A ,use_token_type_embed=__A ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=__A ,use_decoder=__A ,) original_bort.load_parameters(__A ,cast_dtype=__A ,ignore_extra=__A ) __magic_name__ : str = original_bort._collect_params_with_prefix() # Build our config 🤗 __magic_name__ : Union[str, Any] = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(__A ), } __magic_name__ : str = BertConfig.from_dict(__A ) __magic_name__ : List[str] = BertForMaskedLM(__A ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__A: Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__A: Any ,__A: Union[str, Any] ): __magic_name__ : Optional[int] = hf_param.shape __magic_name__ : int = to_torch(params[gluon_param] ) __magic_name__ : List[str] = gluon_param.shape assert ( shape_hf == shape_gluon ), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param __magic_name__ : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' ) __magic_name__ : int = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' ) __magic_name__ : int = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' ) __magic_name__ : Any = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __magic_name__ : Union[str, Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __magic_name__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __magic_name__ : BertSelfAttention = layer.attention.self __magic_name__ : Any = check_and_map_params( self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) __magic_name__ : Union[str, Any] = check_and_map_params( self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) __magic_name__ : str = check_and_map_params( self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) __magic_name__ : Union[str, Any] = check_and_map_params( self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) __magic_name__ : Tuple = check_and_map_params( self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) __magic_name__ : Optional[int] = check_and_map_params( self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output __magic_name__ : BertSelfOutput = layer.attention.output __magic_name__ : int = check_and_map_params( self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' ) __magic_name__ : Union[str, Any] = check_and_map_params( self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' ) __magic_name__ : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' ) __magic_name__ : Tuple = check_and_map_params( self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate __magic_name__ : BertIntermediate = layer.intermediate __magic_name__ : Union[str, Any] = check_and_map_params( intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) __magic_name__ : List[Any] = check_and_map_params( intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output __magic_name__ : BertOutput = layer.output __magic_name__ : List[Any] = check_and_map_params( bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) __magic_name__ : List[Any] = check_and_map_params( bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) __magic_name__ : Optional[int] = check_and_map_params( bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) __magic_name__ : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __magic_name__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' ) __magic_name__ : Tuple = tokenizer.encode_plus(__A )["input_ids"] # Get gluon output __magic_name__ : Optional[Any] = mx.nd.array([input_ids] ) __magic_name__ : Any = original_bort(inputs=__A ,token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__A ) __magic_name__ : List[Any] = BertModel.from_pretrained(__A ) hf_bort_model.eval() __magic_name__ : Optional[int] = tokenizer.encode_plus(__A ,return_tensors='''pt''' ) __magic_name__ : Tuple = hf_bort_model(**__A )[0] __magic_name__ : Tuple = output_gluon[0].asnumpy() __magic_name__ : Optional[Any] = output_hf[0].detach().numpy() __magic_name__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item() __magic_name__ : Optional[int] = np.allclose(__A ,__A ,atol=1e-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' ,__A ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowerCamelCase : Optional[Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
719
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ): '''simple docstring''' UpperCamelCase__ =BlenderbotSmallTokenizer UpperCamelCase__ =False def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: super().setUp() __magic_name__ : Union[str, Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] __magic_name__ : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) __magic_name__ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] __magic_name__ : List[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} __magic_name__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCamelCase_ ) ) def UpperCAmelCase__ ( self : List[str] , **lowerCamelCase_ : Optional[Any] ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]: __magic_name__ : Union[str, Any] = '''adapt act apte''' __magic_name__ : Dict = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: __magic_name__ : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __magic_name__ : str = '''adapt act apte''' __magic_name__ : Any = ['''adapt''', '''act''', '''ap@@''', '''te'''] __magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] __magic_name__ : List[Any] = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ ) def UpperCAmelCase__ ( self : int ) -> int: __magic_name__ : Any = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [1384] __magic_name__ : Dict = '''I am a small frog.''' __magic_name__ : Tuple = tok([src_text] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids'''] __magic_name__ : Tuple = tok.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Tuple ) -> Dict: __magic_name__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) __magic_name__ : Any = '''I am a small frog .''' __magic_name__ : List[str] = '''.''' __magic_name__ : Tuple = tok(lowerCamelCase_ )['''input_ids'''] __magic_name__ : Optional[Any] = tok(lowerCamelCase_ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
501
0
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 0 ) -> None: a_ , a_ : List[Any] = row, column a_ : List[Any] = [[default_value for c in range(SCREAMING_SNAKE_CASE__ )] for r in range(SCREAMING_SNAKE_CASE__ )] def __str__( self : List[Any] ) -> str: a_ : List[str] = F"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier a_ : List[Any] = 0 for row_vector in self.array: for obj in row_vector: a_ : str = max(SCREAMING_SNAKE_CASE__ , len(str(SCREAMING_SNAKE_CASE__ ) ) ) a_ : Optional[int] = F"""%{max_element_length}s""" # Make string and return def single_line(SCREAMING_SNAKE_CASE__ : list[float] ) -> str: nonlocal string_format_identifier a_ : List[str] = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(SCREAMING_SNAKE_CASE__ ) for row_vector in self.array ) return s def __repr__( self : List[Any] ) -> str: return str(self ) def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : tuple[int, int] ) -> bool: if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : tuple[int, int] ) -> Any: assert self.validate_indicies(SCREAMING_SNAKE_CASE__ ) return self.array[loc[0]][loc[1]] def __setitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : float ) -> None: assert self.validate_indicies(SCREAMING_SNAKE_CASE__ ) a_ : List[str] = value def __add__( self : List[str] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert self.row == another.row and self.column == another.column # Add a_ : List[Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): a_ : Any = self[r, c] + another[r, c] return result def __neg__( self : Optional[int] ) -> Matrix: a_ : Any = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): a_ : Tuple = -self[r, c] return result def __sub__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix: return self + (-another) def __mul__( self : int , SCREAMING_SNAKE_CASE__ : int | float | Matrix ) -> Matrix: if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # Scalar multiplication a_ : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): a_ : List[Any] = self[r, c] * another return result elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Matrix multiplication assert self.column == another.row a_ : List[Any] = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: a_ : Any = F"""Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__ )})""" raise TypeError(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : str ) -> Matrix: a_ : List[Any] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): a_ : Any = self[r, c] return result def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : Matrix ) -> Any: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate a_ : List[Any] = v.transpose() a_ : Union[str, Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" a_ : Any = Matrix(3 , 3 , 0 ) for i in range(3 ): a_ : int = 1 print(F"""a^(-1) is {ainv}""" ) # u, v a_ : Dict = Matrix(3 , 1 , 0 ) a_ , a_ , a_ : Tuple = 1, 2, -3 a_ : List[Any] = Matrix(3 , 1 , 0 ) a_ , a_ , a_ : Union[str, Any] = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__A , __A )}""" ) def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" import doctest doctest.testmod() testa()
570
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : List[str] = ['''onnx'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any: requires_backends(self , ['onnx'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: requires_backends(cls , ['onnx'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['onnx'] )
570
1
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase ( unittest.TestCase ): a : Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): _SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) _SCREAMING_SNAKE_CASE = VideoClassificationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase , top_k=2 ) _SCREAMING_SNAKE_CASE = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def lowercase ( self , UpperCamelCase , UpperCamelCase ): for example in examples: _SCREAMING_SNAKE_CASE = video_classifier(UpperCamelCase ) self.assertEqual( UpperCamelCase , [ {"score": ANY(UpperCamelCase ), "label": ANY(UpperCamelCase )}, {"score": ANY(UpperCamelCase ), "label": ANY(UpperCamelCase )}, ] , ) @require_torch def lowercase ( self ): _SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" _SCREAMING_SNAKE_CASE = VideoMAEFeatureExtractor( size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} ) _SCREAMING_SNAKE_CASE = pipeline( "video-classification" , model=UpperCamelCase , feature_extractor=UpperCamelCase , frame_sampling_rate=4 ) _SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" ) _SCREAMING_SNAKE_CASE = video_classifier(UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , ) _SCREAMING_SNAKE_CASE = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}], ] , ) @require_tf def lowercase ( self ): pass
493
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging _snake_case : Optional[int] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) _snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def _a ( ): _SCREAMING_SNAKE_CASE = "https://pypi.org/pypi/diffusers/json" _SCREAMING_SNAKE_CASE = json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )["releases"].keys() return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) ) def _a ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / "__init__.py" if not init_path.exists(): init_path.touch() def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ): init_hf_modules() _SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ): with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f: _SCREAMING_SNAKE_CASE = f.read() # Imports of the form `import .xxx` _SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+\.(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE ) # Unique-ify return list(set(_SCREAMING_SNAKE_CASE ) ) def _a ( _SCREAMING_SNAKE_CASE : List[str] ): _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = [module_file] _SCREAMING_SNAKE_CASE = [] # Let's recurse through all relative imports while not no_change: _SCREAMING_SNAKE_CASE = [] for f in files_to_check: new_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ).parent _SCREAMING_SNAKE_CASE = [str(module_path / m ) for m in new_imports] _SCREAMING_SNAKE_CASE = [f for f in new_import_files if f not in all_relative_imports] _SCREAMING_SNAKE_CASE = [F'{f}.py' for f in new_import_files] _SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) == 0 all_relative_imports.extend(_SCREAMING_SNAKE_CASE ) return all_relative_imports def _a ( _SCREAMING_SNAKE_CASE : str ): with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f: _SCREAMING_SNAKE_CASE = f.read() # Imports of the form `import xxx` _SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE ) # Only keep the top-level module _SCREAMING_SNAKE_CASE = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all _SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE = [] for imp in imports: try: importlib.import_module(_SCREAMING_SNAKE_CASE ) except ImportError: missing_packages.append(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " F'{", ".join(_SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(_SCREAMING_SNAKE_CASE )}`' ) return get_relative_imports(_SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ): _SCREAMING_SNAKE_CASE = module_path.replace(os.path.sep , "." ) _SCREAMING_SNAKE_CASE = importlib.import_module(_SCREAMING_SNAKE_CASE ) if class_name is None: return find_pipeline_class(_SCREAMING_SNAKE_CASE ) return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE : List[Any] ): from ..pipelines import DiffusionPipeline _SCREAMING_SNAKE_CASE = dict(inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) ) _SCREAMING_SNAKE_CASE = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _SCREAMING_SNAKE_CASE ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' F' {loaded_module}.' ) _SCREAMING_SNAKE_CASE = cls return pipeline_class def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , ): _SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE = module_file_or_url _SCREAMING_SNAKE_CASE = "local" elif pretrained_model_name_or_path.count("/" ) == 0: _SCREAMING_SNAKE_CASE = get_diffusers_versions() # cut ".dev0" _SCREAMING_SNAKE_CASE = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: _SCREAMING_SNAKE_CASE = latest_version if latest_version[1:] in available_versions else "main" logger.info(F'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: _SCREAMING_SNAKE_CASE = F'v{revision}' elif revision == "main": _SCREAMING_SNAKE_CASE = revision else: raise ValueError( F'`custom_revision`: {revision} does not exist. Please make sure to choose one of' F' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub _SCREAMING_SNAKE_CASE = COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE , pipeline=_SCREAMING_SNAKE_CASE ) try: _SCREAMING_SNAKE_CASE = cached_download( _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE = "git" _SCREAMING_SNAKE_CASE = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached _SCREAMING_SNAKE_CASE = hf_hub_download( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment _SCREAMING_SNAKE_CASE = check_imports(_SCREAMING_SNAKE_CASE ) # Now we move the module inside our cached dynamic modules. _SCREAMING_SNAKE_CASE = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file ) for module_needed in modules_needed: _SCREAMING_SNAKE_CASE = F'{module_needed}.py' shutil.copy(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE = use_auth_token elif use_auth_token is True: _SCREAMING_SNAKE_CASE = HfFolder.get_token() else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = model_info(_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. _SCREAMING_SNAKE_CASE = submodule_path / commit_hash _SCREAMING_SNAKE_CASE = full_submodule + os.path.sep + commit_hash create_dynamic_module(_SCREAMING_SNAKE_CASE ) if not (submodule_path / module_file).exists(): shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _SCREAMING_SNAKE_CASE , F'{module_needed}.py' , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , ) return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , **_SCREAMING_SNAKE_CASE : Tuple , ): _SCREAMING_SNAKE_CASE = get_cached_module_file( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , ) return get_class_in_module(_SCREAMING_SNAKE_CASE , final_module.replace(".py" , "" ) )
493
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : int = { """configuration_upernet""": ["""UperNetConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Tuple = [ """UperNetForSemanticSegmentation""", """UperNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
257
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _A ( SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" a__ : Optional[Any] =SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a__ : Optional[int] =4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: a__ : int =4 a__ : Optional[int] =48 a__ : str ="pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a__ : str =[6, 6, 6, 6] a__ : Optional[int] =60 a__ : Any =[6, 6, 6, 6] a__ : int ="pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a__ : List[str] =4 a__ : Union[str, Any] ="nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: a__ : str =1 a__ : Optional[Any] =1 a__ : str =126 a__ : Optional[Any] =7 a__ : Optional[int] =2_5_5.0 a__ : str ="" return config def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: a__ : Any =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a__ : str =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" ) if "layers" in name: a__ : Union[str, Any] =name.replace("layers" , "encoder.stages" ) if "residual_group.blocks" in name: a__ : List[Any] =name.replace("residual_group.blocks" , "layers" ) if "attn.proj" in name: a__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a__ : int =name.replace("attn" , "attention.self" ) if "norm1" in name: a__ : List[Any] =name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a__ : Optional[int] =name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a__ : Dict =name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: a__ : List[Any] =name.replace("q_bias" , "query.bias" ) if "k_bias" in name: a__ : Optional[int] =name.replace("k_bias" , "key.bias" ) if "v_bias" in name: a__ : Optional[Any] =name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: a__ : List[str] =name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if "patch_embed.proj" in name: a__ : List[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" ) if name == "norm.weight": a__ : Dict ="layernorm.weight" if name == "norm.bias": a__ : Any ="layernorm.bias" if "conv_first" in name: a__ : Tuple =name.replace("conv_first" , "first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: a__ : List[str] =name.replace("conv_last" , "final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: a__ : str =name.replace("conv_before_upsample.0" , "conv_before_upsample" ) if "upsample.0" in name: a__ : Any =name.replace("upsample.0" , "upsample.convolution_0" ) if "upsample.2" in name: a__ : Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" ) a__ : Any ="upsample." + name elif config.upsampler == "pixelshuffledirect": a__ : str =name.replace("upsample.0.weight" , "upsample.conv.weight" ) a__ : Any =name.replace("upsample.0.bias" , "upsample.conv.bias" ) else: pass else: a__ : Dict ="swin2sr." + name return name def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" for key in orig_state_dict.copy().keys(): a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE ) if "qkv" in key: a__ : str =key.split("." ) a__ : Optional[int] =int(key_split[1] ) a__ : Dict =int(key_split[4] ) a__ : List[Any] =config.embed_dim if "weight" in key: a__ : List[Any] =val[:dim, :] a__ : List[str] =val[dim : dim * 2, :] a__ : Dict =val[-dim:, :] else: a__ : int =val[:dim] a__ : Union[str, Any] =val[dim : dim * 2] a__ : Tuple =val[-dim:] pass else: a__ : Union[str, Any] =val return orig_state_dict def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ): """simple docstring""" a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE ) a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE ) model.eval() a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" ) a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ , a__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError("Missing keys when converting: {}".format(SCREAMING_SNAKE_CASE ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values a__ : str ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" ) a__ : Dict =SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values a__ : List[str] =126 if "Jpeg" in checkpoint_url else 256 a__ : Optional[Any] =Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) a__ : Dict =transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) if config.num_channels == 1: a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1 ) a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: a__ : str =torch.Size([1, 3, 512, 512] ) a__ : List[str] =torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024] ) a__ : List[str] =torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here a__ : Tuple =torch.Size([1, 3, 1_024, 1_024] ) a__ : Optional[int] =torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a__ : Tuple =torch.Size([1, 3, 512, 512] ) a__ : str =torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024] ) a__ : Optional[Any] =torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 ) print("Looks ok!" ) a__ : int ={ "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } a__ : Any =url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") UpperCAmelCase : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
563
0
'''simple docstring''' from collections.abc import Callable import numpy as np def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ : List[Any] = np.zeros((n + 1,) ) lowerCAmelCase__ : Any = ya lowerCAmelCase__ : List[Any] = xa for k in range(UpperCamelCase ): lowerCAmelCase__ : int = y[k] + step_size * ode_func(UpperCamelCase , y[k] ) lowerCAmelCase__ : Optional[Any] = y[k] + ( (step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
160
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" while a != 0: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a return b def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if gcd(UpperCamelCase , UpperCamelCase ) != 1: lowerCAmelCase__ : List[Any] = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1, m while va != 0: lowerCAmelCase__ : Optional[int] = ua // va lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
160
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : List[str] =get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase ( __a , unittest.TestCase ): __lowercase = DebertaVaTokenizer __lowercase = DebertaVaTokenizerFast __lowercase = True __lowercase = True def UpperCAmelCase_ ( self :str )-> str: super().setUp() # We have a SentencePiece fixture for testing A__ = DebertaVaTokenizer(a__ , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self :str , lowercase_ :int )-> List[str]: A__ = "this is a test" A__ = "this is a test" return input_text, output_text def UpperCAmelCase_ ( self :Tuple )-> List[Any]: A__ = "<pad>" A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ ) def UpperCAmelCase_ ( self :str )-> Optional[int]: A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(a__ ) , 3_00_01 ) def UpperCAmelCase_ ( self :Tuple )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def UpperCAmelCase_ ( self :List[Any] )-> str: # fmt: off A__ = " \tHeLLo!how \n Are yoU? " A__ = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]: pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def UpperCAmelCase_ ( self :List[Any] )-> int: pass def UpperCAmelCase_ ( self :Dict )-> List[str]: # fmt: off A__ = "I was born in 92000, and this is falsé." A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A__ = DebertaVaTokenizer(a__ , split_by_punct=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , split_by_punct=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :List[Any] )-> List[Any]: # fmt: off A__ = "I was born in 92000, and this is falsé." A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :Any )-> Optional[int]: # fmt: off A__ = "I was born in 92000, and this is falsé." A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :Optional[Any] )-> str: # fmt: off A__ = "I was born in 92000, and this is falsé." A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict: # fmt: off A__ = " \tHeLLo!how \n Are yoU? " A__ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :str )-> Optional[Any]: A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = "I was born in 92000, and this is falsé." A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) ) A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) ) self.assertListEqual(a__ , a__ ) A__ = tokenizer.encode(a__ , add_special_tokens=a__ ) A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(a__ ) A__ = rust_tokenizer.encode(a__ ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]: A__ = "This is a test" A__ = [13, 1, 43_98, 25, 21, 12_89] A__ = ["▁", "T", "his", "▁is", "▁a", "▁test"] A__ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] A__ = DebertaVaTokenizer(a__ , keep_accents=a__ ) A__ = DebertaVaTokenizerFast(a__ , keep_accents=a__ ) A__ = tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) A__ = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) A__ = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) # fmt: off A__ = "I was born in 92000, and this is falsé." A__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] A__ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on A__ = tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) A__ = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) A__ = tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) A__ = rust_tokenizer.convert_ids_to_tokens(a__ ) self.assertListEqual(a__ , a__ ) def UpperCAmelCase_ ( self :List[Any] )-> Tuple: A__ = DebertaVaTokenizer(a__ ) A__ = tokenizer.encode("sequence builders" ) A__ = tokenizer.encode("multi-sequence build" ) A__ = tokenizer.build_inputs_with_special_tokens(a__ ) A__ = tokenizer.build_inputs_with_special_tokens(a__ , a__ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , ) @slow def UpperCAmelCase_ ( self :Tuple )-> str: # fmt: off A__ = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
440
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _SCREAMING_SNAKE_CASE ( __a ): __SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor""" __SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""") def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ): super().__init__(a__ , a__ ) def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ): __magic_name__ = kwargs.pop('''sampling_rate''' , a__ ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: __magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ ) if audios is not None: __magic_name__ = self.feature_extractor( a__ , sampling_rate=a__ , return_tensors=a__ , **a__ ) if text is not None and audios is not None: __magic_name__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ): return self.tokenizer.batch_decode(*a__ , **a__ ) def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ): return self.tokenizer.decode(*a__ , **a__ ) @property def snake_case__ ( self : Any ): __magic_name__ = self.tokenizer.model_input_names __magic_name__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
432
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __A : """simple docstring""" UpperCamelCase__ : str =None def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict ) __UpperCamelCase : List[str] =json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , lowerCamelCase__ ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase : Optional[int] =os.path.join(lowerCamelCase__ , 'feat_extract.json' ) feat_extract_first.to_json_file(lowerCamelCase__ ) __UpperCamelCase : List[str] =self.feature_extraction_class.from_json_file(lowerCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __UpperCamelCase : Union[str, Any] =feat_extract_first.save_pretrained(lowerCamelCase__ )[0] check_json_file_has_correct_format(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =self.feature_extraction_class.from_pretrained(lowerCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.feature_extraction_class() self.assertIsNotNone(lowerCamelCase__ )
721
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def A ( a_ = "" ) -> dict[str, float]: __UpperCamelCase : Tuple =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250' __UpperCamelCase : Optional[int] =BeautifulSoup(requests.get(a_ ).text ,'html.parser' ) __UpperCamelCase : Union[str, Any] =soup.find_all('td' ,attrs='titleColumn' ) __UpperCamelCase : Any =soup.find_all('td' ,class_='ratingColumn imdbRating' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(a_ ,a_ ) } def A ( a_ = "IMDb_Top_250_Movies.csv" ) -> None: __UpperCamelCase : Dict =get_imdb_top_aaa_movies() with open(a_ ,'w' ,newline='' ) as out_file: __UpperCamelCase : Any =csv.writer(a_ ) writer.writerow(['Movie title', 'IMDb rating'] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
154
0
import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _A : def __init__( self : Tuple , _A : int , _A : int=13 , _A : Any=30 , _A : int=2 , _A : str=3 , _A : Tuple=True , _A : Union[str, Any]=True , _A : int=32 , _A : Union[str, Any]=5 , _A : Union[str, Any]=4 , _A : List[Any]=37 , _A : Tuple="gelu" , _A : Union[str, Any]=0.1 , _A : Any=0.1 , _A : Optional[Any]=10 , _A : List[str]=0.02 , _A : Dict=3 , _A : Tuple=None , _A : str=2 , ) -> Optional[Any]: """simple docstring""" lowercase : Dict = parent lowercase : Optional[int] = batch_size lowercase : Any = image_size lowercase : Any = patch_size lowercase : List[str] = num_channels lowercase : Optional[Any] = is_training lowercase : List[Any] = use_labels lowercase : Optional[Any] = hidden_size lowercase : int = num_hidden_layers lowercase : Union[str, Any] = num_attention_heads lowercase : Union[str, Any] = intermediate_size lowercase : Union[str, Any] = hidden_act lowercase : Optional[Any] = hidden_dropout_prob lowercase : List[Any] = attention_probs_dropout_prob lowercase : Tuple = type_sequence_label_size lowercase : str = initializer_range lowercase : Union[str, Any] = scope lowercase : int = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase : List[str] = (image_size // patch_size) ** 2 lowercase : Union[str, Any] = num_patches + 2 def __a ( self : Any ) -> Union[str, Any]: """simple docstring""" lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : List[Any] = None if self.use_labels: lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : str = self.get_config() return config, pixel_values, labels def __a ( self : Tuple ) -> Tuple: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __a ( self : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] ) -> Dict: """simple docstring""" lowercase : Any = DeiTModel(config=_A ) model.to(_A ) model.eval() lowercase : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Union[str, Any] , _A : Any , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]: """simple docstring""" lowercase : Optional[int] = DeiTForMaskedImageModeling(config=_A ) model.to(_A ) model.eval() lowercase : Any = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase : List[Any] = 1 lowercase : Union[str, Any] = DeiTForMaskedImageModeling(_A ) model.to(_A ) model.eval() lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase : Optional[Any] = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __a ( self : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : List[str] ) -> Any: """simple docstring""" lowercase : Union[str, Any] = self.type_sequence_label_size lowercase : List[Any] = DeiTForImageClassification(_A ) model.to(_A ) model.eval() lowercase : Tuple = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase : Any = 1 lowercase : Optional[Any] = DeiTForImageClassification(_A ) model.to(_A ) model.eval() lowercase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase : Optional[int] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Optional[int] ) -> Tuple: """simple docstring""" lowercase : str = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Union[str, Any] = config_and_inputs lowercase : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : int = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Tuple = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) _UpperCamelCase : Optional[int] = False _UpperCamelCase : Any = False _UpperCamelCase : Any = False def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : Optional[int] = DeiTModelTester(self ) lowercase : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def __a ( self : List[Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __a ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass def __a ( self : int ) -> str: """simple docstring""" lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : int = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def __a ( self : Tuple ) -> Union[str, Any]: """simple docstring""" lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Union[str, Any] = model_class(_A ) lowercase : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : List[Any] = [*signature.parameters.keys()] lowercase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def __a ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __a ( self : int ) -> int: """simple docstring""" lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def __a ( self : List[Any] ) -> int: """simple docstring""" lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def __a ( self : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : str=False ) -> int: """simple docstring""" lowercase : Optional[int] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __a ( self : int ) -> Dict: """simple docstring""" if not self.model_tester.is_training: return lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Union[str, Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_A ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowercase : Optional[Any] = model_class(_A ) model.to(_A ) model.train() lowercase : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A ) lowercase : List[Any] = model(**_A ).loss loss.backward() def __a ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowercase : List[Any] = False lowercase : List[str] = True for model_class in self.all_model_classes: if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowercase : List[str] = model_class(_A ) model.gradient_checkpointing_enable() model.to(_A ) model.train() lowercase : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A ) lowercase : Tuple = model(**_A ).loss loss.backward() def __a ( self : Optional[int] ) -> Dict: """simple docstring""" lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Union[str, Any] = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_A ), *get_values(_A ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): lowercase : List[Any] = problem_type['''title'''] lowercase : List[Any] = problem_type['''num_labels'''] lowercase : Optional[int] = model_class(_A ) model.to(_A ) model.train() lowercase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A ) if problem_type["num_labels"] > 1: lowercase : Optional[Any] = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) lowercase : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_A ) as warning_list: lowercase : List[str] = model(**_A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def __a ( self : Any ) -> Optional[int]: """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str = DeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def snake_case( ) -> Union[str, Any]: '''simple docstring''' lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _A ( unittest.TestCase ): @cached_property def __a ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __a ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowercase : str = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( _A ) lowercase : Any = self.default_image_processor lowercase : List[str] = prepare_img() lowercase : str = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): lowercase : int = model(**_A ) # verify the logits lowercase : Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _A ) lowercase : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def __a ( self : List[Any] ) -> str: """simple docstring""" lowercase : Any = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) lowercase : str = self.default_image_processor lowercase : List[str] = prepare_img() lowercase : List[Any] = image_processor(images=_A , return_tensors='''pt''' ) lowercase : List[str] = inputs.pixel_values.to(_A ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase : List[Any] = model(_A )
217
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _A ( _lowerCamelCase , unittest.TestCase ): _UpperCamelCase : Tuple = AlbertTokenizer _UpperCamelCase : Dict = AlbertTokenizerFast _UpperCamelCase : Optional[Any] = True _UpperCamelCase : Union[str, Any] = True _UpperCamelCase : List[str] = True def __a ( self : int ) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase : List[Any] = AlbertTokenizer(_A ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : List[Any] , _A : Tuple ) -> Any: """simple docstring""" lowercase : int = '''this is a test''' lowercase : List[str] = '''this is a test''' return input_text, output_text def __a ( self : str ) -> List[Any]: """simple docstring""" lowercase : Optional[int] = '''<pad>''' lowercase : Any = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def __a ( self : int ) -> List[Any]: """simple docstring""" lowercase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(_A ) , 30_000 ) def __a ( self : List[str] ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def __a ( self : Optional[int] ) -> int: """simple docstring""" if not self.test_rust_tokenizer: return lowercase : List[Any] = self.get_tokenizer() lowercase : str = self.get_rust_tokenizer() lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : int = tokenizer.tokenize(_A ) lowercase : List[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowercase : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) lowercase : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowercase : List[Any] = self.get_rust_tokenizer() lowercase : Dict = tokenizer.encode(_A ) lowercase : List[Any] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def __a ( self : List[str] ) -> Any: """simple docstring""" lowercase : Union[str, Any] = AlbertTokenizer(_A , keep_accents=_A ) lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [48, 25, 21, 1_289] ) lowercase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual(_A , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowercase : Any = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase : Tuple = AlbertTokenizer(_A ) lowercase : List[str] = tokenizer.encode('''sequence builders''' ) lowercase : int = tokenizer.encode('''multi-sequence build''' ) lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A ) lowercase : Dict = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __a ( self : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase : Any = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
217
1
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowercase__( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self :int ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) ) def __lowerCAmelCase ( self :List[str] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :str ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE : Dict = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE : int = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Tuple ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE : Dict = '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] SCREAMING_SNAKE_CASE : str = '''fp16''' self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) ) def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
18
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]: '''simple docstring''' if radian_mode: return [magnitude * cos(a_ ), magnitude * sin(a_ )] return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )] def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool: '''simple docstring''' SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ ) SCREAMING_SNAKE_CASE : float = sum(a_ ) return abs(a_ ) < eps if __name__ == "__main__": # Test to check if it works lowerCamelCase__ : Optional[Any] = array( [ polar_force(7_1_8.4, 180 - 30), polar_force(8_7_9.5_4, 45), polar_force(100, -90), ] ) lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg lowerCamelCase__ : Union[str, Any] = array( [ polar_force(30 * 9.8_1, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
18
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { 'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json', # See all Marian models at https://huggingface.co/models?filter=marian } class UpperCamelCase ( _UpperCAmelCase ): __UpperCamelCase = '''marian''' __UpperCamelCase = ['''past_key_values'''] __UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Optional[Any] ,_lowerCAmelCase : str=58_101 ,_lowerCAmelCase : Union[str, Any]=None ,_lowerCAmelCase : Tuple=1_024 ,_lowerCAmelCase : List[Any]=12 ,_lowerCAmelCase : int=4_096 ,_lowerCAmelCase : int=16 ,_lowerCAmelCase : str=12 ,_lowerCAmelCase : List[str]=4_096 ,_lowerCAmelCase : Tuple=16 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Any=0.0 ,_lowerCAmelCase : List[Any]=True ,_lowerCAmelCase : Dict=True ,_lowerCAmelCase : Union[str, Any]="gelu" ,_lowerCAmelCase : int=1_024 ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Optional[int]=0.0 ,_lowerCAmelCase : str=0.0_2 ,_lowerCAmelCase : Tuple=58_100 ,_lowerCAmelCase : int=False ,_lowerCAmelCase : Any=58_100 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : List[Any]=True ,**_lowerCAmelCase : int ,): """simple docstring""" __snake_case = vocab_size __snake_case = decoder_vocab_size or vocab_size __snake_case = max_position_embeddings __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = use_cache __snake_case = encoder_layers __snake_case = scale_embedding # scale factor will be sqrt(d_model) if True __snake_case = share_encoder_decoder_embeddings super().__init__( pad_token_id=_lowercase ,eos_token_id=_lowercase ,is_encoder_decoder=_lowercase ,decoder_start_token_id=_lowercase ,forced_eos_token_id=_lowercase ,**_lowercase ,) class UpperCamelCase ( _UpperCAmelCase ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def UpperCamelCase_ ( self : Tuple ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case = {0: '''batch'''} __snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowercase ,direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case = self.num_layers for i in range(_lowercase ): __snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''} __snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''} else: __snake_case = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def UpperCamelCase_ ( self : Tuple ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __snake_case = super().outputs else: __snake_case = super(_lowercase ,self ).outputs if self.use_past: __snake_case = self.num_layers for i in range(_lowercase ): __snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''} __snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,): """simple docstring""" __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ) # Generate decoder inputs __snake_case = seq_length if not self.use_past else 1 __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ) __snake_case = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} __snake_case = dict(**_lowercase ,**_lowercase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case = common_inputs['''input_ids'''].shape __snake_case = common_inputs['''decoder_input_ids'''].shape[1] __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = decoder_seq_length + 3 __snake_case = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __snake_case = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(_lowercase ,_lowercase )] ,dim=1 ) __snake_case = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __snake_case = self.num_layers __snake_case = min(_lowercase ,_lowercase ) __snake_case = max(_lowercase ,_lowercase ) - min_num_layers __snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_lowercase ): common_inputs["past_key_values"].append( ( torch.zeros(_lowercase ), torch.zeros(_lowercase ), torch.zeros(_lowercase ), torch.zeros(_lowercase ), ) ) # TODO: test this. __snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_lowercase ,_lowercase ): common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) ) return common_inputs def UpperCamelCase_ ( self : int ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,): """simple docstring""" __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values __snake_case = seqlen + 2 __snake_case = self.num_layers __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = common_inputs['''attention_mask'''].dtype __snake_case = torch.cat( [common_inputs["attention_mask"], torch.ones(_lowercase ,_lowercase ,dtype=_lowercase )] ,dim=1 ) __snake_case = [ (torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase ) ] return common_inputs def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,): """simple docstring""" __snake_case = compute_effective_axis_dimension( _lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case = tokenizer.num_special_tokens_to_add(_lowercase ) __snake_case = compute_effective_axis_dimension( _lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowercase ) # Generate dummy inputs according to compute batch and sequence __snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size __snake_case = dict(tokenizer(_lowercase ,return_tensors=_lowercase ) ) return common_inputs def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase ) else: __snake_case = self._generate_dummy_inputs_for_causal_lm( _lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase ) return common_inputs def UpperCamelCase_ ( self : str ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Any ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Union[str, Any] ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __snake_case = super()._flatten_past_key_values_(_lowercase ,_lowercase ,_lowercase ,_lowercase ) else: __snake_case = super(_lowercase ,self )._flatten_past_key_values_( _lowercase ,_lowercase ,_lowercase ,_lowercase ) @property def UpperCamelCase_ ( self : Dict ): """simple docstring""" return 1E-4
524
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowercase : @staticmethod def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ): pass def a ( A__ ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowercase ( unittest.TestCase ): lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ): SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ): SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase ) import datasets SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : Dict = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , _lowercase , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def lowercase__ ( self : Optional[int] ): pass @slow @require_torch def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase ) SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def lowercase__ ( self : str ): # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
35
0
from torch import nn class UpperCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase): super().__init__() lowerCAmelCase_ = class_size lowerCAmelCase_ = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase) def lowercase__ ( self , _UpperCAmelCase): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase_ = self.mlp(_UpperCAmelCase) return logits
413
def lowerCamelCase_ ( ): """simple docstring""" return [ a * b * (10_00 - a - b) for a in range(1 , 9_99 ) for b in range(A , 9_99 ) if (a * a + b * b == (10_00 - a - b) ** 2) ][0] if __name__ == "__main__": print(f'''{solution() = }''')
413
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __A : Tuple = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" _A = {} state_dict.pop('pixel_mean' , _SCREAMING_SNAKE_CASE ) state_dict.pop('pixel_std' , _SCREAMING_SNAKE_CASE ) _A = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _A = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _A = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) ) if layer_nb == 0: _A = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: _A = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: _A = key.replace('layers.2' , 'proj_out' ) _A = value _A = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> Optional[int]: """simple docstring""" _A = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: _A = SamConfig() elif "sam_vit_l" in model_name: _A = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _A = SamConfig( vision_config=_SCREAMING_SNAKE_CASE , ) elif "sam_vit_h" in model_name: _A = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _A = SamConfig( vision_config=_SCREAMING_SNAKE_CASE , ) _A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) _A = replace_keys(_SCREAMING_SNAKE_CASE ) _A = SamImageProcessor() _A = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE ) _A = SamModel(_SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _A = hf_model.to('cuda' ) _A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' _A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) _A = [[[400, 650]]] _A = [[1]] _A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_SCREAMING_SNAKE_CASE ) _A = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 _A = processor( images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_SCREAMING_SNAKE_CASE ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 _A = ((75, 275, 1_725, 850),) _A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_SCREAMING_SNAKE_CASE ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. _A = [[[400, 650], [800, 650]]] _A = [[1, 1]] _A = processor( images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_SCREAMING_SNAKE_CASE ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() __A : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __A : Union[str, Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
27
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __UpperCAmelCase( lowercase_ ): # vision encoder if "img_encoder.pos_embed" in name: _lowerCamelCase : Tuple = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' ) if "img_encoder.patch_embed.proj" in name: _lowerCamelCase : List[str] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' ) if "img_encoder.patch_embed.norm" in name: _lowerCamelCase : Dict = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' ) if "img_encoder.layers" in name: _lowerCamelCase : Dict = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' ) if "blocks" in name and "res" not in name: _lowerCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' ) if "attn" in name and "pre_assign" not in name: _lowerCamelCase : Tuple = name.replace('''attn''' , '''self_attn''' ) if "proj" in name and "self_attn" in name and "text" not in name: _lowerCamelCase : Optional[int] = name.replace('''proj''' , '''out_proj''' ) if "pre_assign_attn.attn.proj" in name: _lowerCamelCase : List[Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' ) if "norm1" in name: _lowerCamelCase : int = name.replace('''norm1''' , '''layer_norm1''' ) if "norm2" in name and "pre_assign" not in name: _lowerCamelCase : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' ) if "img_encoder.norm" in name: _lowerCamelCase : Dict = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' ) # text encoder if "text_encoder.token_embedding" in name: _lowerCamelCase : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' ) if "text_encoder.positional_embedding" in name: _lowerCamelCase : Optional[Any] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "text_encoder.transformer.resblocks." in name: _lowerCamelCase : Dict = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' ) if "ln_1" in name: _lowerCamelCase : Tuple = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: _lowerCamelCase : List[str] = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: _lowerCamelCase : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: _lowerCamelCase : str = name.replace('''c_proj''' , '''fc2''' ) if "text_encoder" in name: _lowerCamelCase : Tuple = name.replace('''text_encoder''' , '''text_model''' ) if "ln_final" in name: _lowerCamelCase : str = name.replace('''ln_final''' , '''final_layer_norm''' ) # projection layers if "img_projector.linear_hidden." in name: _lowerCamelCase : str = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' ) if "img_projector.linear_out." in name: _lowerCamelCase : Union[str, Any] = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' ) if "text_projector.linear_hidden" in name: _lowerCamelCase : List[str] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' ) if "text_projector.linear_out" in name: _lowerCamelCase : Any = name.replace('''text_projector.linear_out''' , '''text_projection.3''' ) return name def __UpperCAmelCase( lowercase_ , lowercase_ ): for key in orig_state_dict.copy().keys(): _lowerCamelCase : Optional[int] = orig_state_dict.pop(lowercase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _lowerCamelCase : Tuple = key.split('''.''' ) _lowerCamelCase, _lowerCamelCase : Dict = int(key_split[2] ), int(key_split[4] ) _lowerCamelCase : Optional[int] = config.vision_config.hidden_size if "weight" in key: _lowerCamelCase : List[Any] = val[:dim, :] _lowerCamelCase : List[Any] = val[dim : dim * 2, :] _lowerCamelCase : Optional[Any] = val[-dim:, :] else: _lowerCamelCase : Union[str, Any] = val[:dim] _lowerCamelCase : Optional[Any] = val[dim : dim * 2] _lowerCamelCase : Any = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _lowerCamelCase : Optional[int] = key.split('''.''' ) _lowerCamelCase : Optional[Any] = int(key_split[3] ) _lowerCamelCase : Union[str, Any] = config.text_config.hidden_size if "weight" in key: _lowerCamelCase : Any = val[:dim, :] _lowerCamelCase : Optional[Any] = val[ dim : dim * 2, : ] _lowerCamelCase : Dict = val[-dim:, :] else: _lowerCamelCase : List[Any] = val[:dim] _lowerCamelCase : Tuple = val[dim : dim * 2] _lowerCamelCase : str = val[-dim:] else: _lowerCamelCase : Optional[Any] = rename_key(lowercase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): _lowerCamelCase : List[str] = val.squeeze_() else: _lowerCamelCase : Tuple = val return orig_state_dict def __UpperCAmelCase( ): _lowerCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowerCamelCase : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_="groupvit-gcc-yfcc" , lowercase_=False ): _lowerCamelCase : Optional[Any] = GroupViTConfig() _lowerCamelCase : Any = GroupViTModel(lowercase_ ).eval() _lowerCamelCase : Optional[Any] = torch.load(lowercase_ , map_location='''cpu''' )['''model'''] _lowerCamelCase : List[str] = convert_state_dict(lowercase_ , lowercase_ ) _lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(lowercase_ , strict=lowercase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase_ ) == 0) # verify result _lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) _lowerCamelCase : Dict = prepare_img() _lowerCamelCase : str = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase_ , padding=lowercase_ , return_tensors='''pt''' ) with torch.no_grad(): _lowerCamelCase : Optional[int] = model(**lowercase_ ) if model_name == "groupvit-gcc-yfcc": _lowerCamelCase : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] ) elif model_name == "groupvit-gcc-redcaps": _lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 ) processor.save_pretrained(lowercase_ ) model.save_pretrained(lowercase_ ) print('''Successfully saved processor and model to''' , lowercase_ ) if push_to_hub: print('''Pushing to the hub...''' ) processor.push_to_hub(lowercase_ , organization='''nielsr''' ) model.push_to_hub(lowercase_ , organization='''nielsr''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) _lowerCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
114
0
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int): if not isinstance(lowerCamelCase , lowerCamelCase): raise ValueError("""check_bouncy() accepts only integer arguments""") A_ : List[str] = str(lowerCamelCase) A_ : Union[str, Any] = """""".join(sorted(lowerCamelCase)) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def lowerCamelCase ( lowerCamelCase : float = 99): if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""") A_ : Optional[int] = 0 A_ : int = 1 while True: if check_bouncy(lowerCamelCase): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(99)}""")
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowerCAmelCase : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") _lowerCAmelCase : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) _lowerCAmelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase_ : __SCREAMING_SNAKE_CASE : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) __SCREAMING_SNAKE_CASE : int = field(default=3_2 , metadata={'help': 'The size of the square patches to use for masking.'} ) __SCREAMING_SNAKE_CASE : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=_UpperCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=_UpperCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def snake_case_ ( self : str ): _UpperCAmelCase : List[str] = {} if self.train_dir is not None: _UpperCAmelCase : Optional[int] = self.train_dir if self.validation_dir is not None: _UpperCAmelCase : Union[str, Any] = self.validation_dir _UpperCAmelCase : str = data_files if data_files else None @dataclass class UpperCAmelCase_ : __SCREAMING_SNAKE_CASE : str = field( default=_UpperCamelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=_UpperCamelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) __SCREAMING_SNAKE_CASE : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) __SCREAMING_SNAKE_CASE : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} ) __SCREAMING_SNAKE_CASE : bool = field( default=_UpperCamelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=_UpperCamelCase , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=_UpperCamelCase , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=_UpperCamelCase , metadata={'help': 'Stride to use for the encoder.'} , ) class UpperCAmelCase_ : def __init__( self : Tuple , A : Dict=1_9_2 , A : Tuple=3_2 , A : str=4 , A : Any=0.6 ): _UpperCAmelCase : Union[str, Any] = input_size _UpperCAmelCase : str = mask_patch_size _UpperCAmelCase : Any = model_patch_size _UpperCAmelCase : List[Any] = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _UpperCAmelCase : int = self.input_size // self.mask_patch_size _UpperCAmelCase : int = self.mask_patch_size // self.model_patch_size _UpperCAmelCase : List[Any] = self.rand_size**2 _UpperCAmelCase : Any = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[Any] ): _UpperCAmelCase : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count] _UpperCAmelCase : List[str] = np.zeros(self.token_count , dtype=A ) _UpperCAmelCase : Union[str, Any] = 1 _UpperCAmelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) ) _UpperCAmelCase : Any = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> int: '''simple docstring''' _UpperCAmelCase : Dict = torch.stack([example["pixel_values"] for example in examples] ) _UpperCAmelCase : int = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __snake_case ( ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase : List[str] = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _UpperCAmelCase : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _UpperCAmelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase : int = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0: _UpperCAmelCase : int = ds["train"].train_test_split(data_args.train_val_split ) _UpperCAmelCase : List[Any] = split["train"] _UpperCAmelCase : Any = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : str = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: _UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: _UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(f'New config: {config}' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(SCREAMING_SNAKE_CASE__ , "decoder_type" ): _UpperCAmelCase : Tuple = "simmim" # adapt config _UpperCAmelCase : Any = model_args.image_size if model_args.image_size is not None else config.image_size _UpperCAmelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size _UpperCAmelCase : Dict = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _UpperCAmelCase : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: _UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: _UpperCAmelCase : Optional[int] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _UpperCAmelCase : List[str] = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _UpperCAmelCase : Tuple = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _UpperCAmelCase : int = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__ ) if training_args.do_train: _UpperCAmelCase : Tuple = ds["train"].column_names else: _UpperCAmelCase : Union[str, Any] = ds["validation"].column_names if data_args.image_column_name is not None: _UpperCAmelCase : Any = data_args.image_column_name elif "image" in column_names: _UpperCAmelCase : List[Any] = "image" elif "img" in column_names: _UpperCAmelCase : List[Any] = "img" else: _UpperCAmelCase : List[Any] = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _UpperCAmelCase : Dict = Compose( [ Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _UpperCAmelCase : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(SCREAMING_SNAKE_CASE__ : List[str] ): _UpperCAmelCase : List[Any] = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]] _UpperCAmelCase : Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _UpperCAmelCase : Optional[Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(SCREAMING_SNAKE_CASE__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _UpperCAmelCase : List[Any] = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ ) # Initialize our trainer _UpperCAmelCase : Tuple = Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: _UpperCAmelCase : int = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase : Tuple = last_checkpoint _UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase : Optional[int] = trainer.evaluate() trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ ) trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ ) # Write model card and (optionally) push to hub _UpperCAmelCase : Tuple = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
289
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : List[Any] = { "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltModel", "LiltPreTrainedModel", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
289
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __lowerCamelCase ( A__ : Dict , A__ : str , A__ : str , A__ : Any ) -> int: if isinstance(A__ , A__ ): lowerCamelCase_ : str = np.full((len(A__ ), sequence_length, 2) , A__ ) else: lowerCamelCase_ : Union[str, Any] = np.full((len(A__ ), sequence_length) , A__ ) for i, tensor in enumerate(A__ ): if padding_side == "right": if isinstance(A__ , A__ ): lowerCamelCase_ : List[str] = tensor[:sequence_length] else: lowerCamelCase_ : List[str] = tensor[:sequence_length] else: if isinstance(A__ , A__ ): lowerCamelCase_ : List[Any] = tensor[:sequence_length] else: lowerCamelCase_ : str = tensor[:sequence_length] return out_tensor.tolist() def __lowerCamelCase ( A__ : Tuple ) -> List[Any]: lowerCamelCase_ : Dict = ord(A__ ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True lowerCamelCase_ : int = unicodedata.category(A__ ) if cat.startswith("""P""" ): return True return False @dataclass class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = 42 _a = True _a = None _a = None _a = -100 _a = "pt" def _lowerCAmelCase ( self : List[Any] , __a : List[str] ) ->List[str]: import torch lowerCamelCase_ : str = """label""" if """label""" in features[0].keys() else """labels""" lowerCamelCase_ : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowerCamelCase_ : List[Any] = self.tokenizer.pad( __a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch lowerCamelCase_ : Tuple = torch.tensor(batch["""entity_ids"""] ).shape[1] lowerCamelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": lowerCamelCase_ : Any = [ list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels ] else: lowerCamelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels ] lowerCamelCase_ : List[str] = [feature["""ner_tags"""] for feature in features] lowerCamelCase_ : Tuple = padding_tensor(__a , -1 , __a , __a ) lowerCamelCase_ : int = [feature["""original_entity_spans"""] for feature in features] lowerCamelCase_ : Optional[Any] = padding_tensor(__a , (-1, -1) , __a , __a ) lowerCamelCase_ : Optional[Any] = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()} return batch
171
from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[str] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = "van" def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str: super().__init__(**__a ) lowerCamelCase_ : Optional[Any] = image_size lowerCamelCase_ : List[str] = num_channels lowerCamelCase_ : Union[str, Any] = patch_sizes lowerCamelCase_ : List[Any] = strides lowerCamelCase_ : Union[str, Any] = hidden_sizes lowerCamelCase_ : Tuple = depths lowerCamelCase_ : str = mlp_ratios lowerCamelCase_ : Any = hidden_act lowerCamelCase_ : Union[str, Any] = initializer_range lowerCamelCase_ : Union[str, Any] = layer_norm_eps lowerCamelCase_ : Union[str, Any] = layer_scale_init_value lowerCamelCase_ : List[str] = drop_path_rate lowerCamelCase_ : str = dropout_rate
171
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class snake_case_ ( __A ): '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = 42 class snake_case_ ( nn.Module ): '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = (16, 32, 96, 2_56) lowerCamelCase = jnp.floataa def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: lowerCamelCase_ : List[str] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase_ : Optional[int] = [] for i in range(len(self.block_out_channels ) - 1 ): lowerCamelCase_ : List[Any] = self.block_out_channels[i] lowerCamelCase_ : str = self.block_out_channels[i + 1] lowerCamelCase_ : Tuple = nn.Conv( __magic_name__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__magic_name__ ) lowerCamelCase_ : List[str] = nn.Conv( __magic_name__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(__magic_name__ ) lowerCamelCase_ : List[Any] = blocks lowerCamelCase_ : List[Any] = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : str , __magic_name__ : Tuple ) -> Optional[Any]: lowerCamelCase_ : Dict = self.conv_in(__magic_name__ ) lowerCamelCase_ : Optional[int] = nn.silu(__magic_name__ ) for block in self.blocks: lowerCamelCase_ : Optional[int] = block(__magic_name__ ) lowerCamelCase_ : List[str] = nn.silu(__magic_name__ ) lowerCamelCase_ : Tuple = self.conv_out(__magic_name__ ) return embedding @flax_register_to_config class snake_case_ ( nn.Module , __A , __A ): '''simple docstring''' lowerCamelCase = 32 lowerCamelCase = 4 lowerCamelCase = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowerCamelCase = False lowerCamelCase = (3_20, 6_40, 12_80, 12_80) lowerCamelCase = 2 lowerCamelCase = 8 lowerCamelCase = None lowerCamelCase = 12_80 lowerCamelCase = 0.0 lowerCamelCase = False lowerCamelCase = jnp.floataa lowerCamelCase = True lowerCamelCase = 0 lowerCamelCase = "rgb" lowerCamelCase = (16, 32, 96, 2_56) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : jax.random.KeyArray ) -> FrozenDict: # init input tensors lowerCamelCase_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size) lowerCamelCase_ : Tuple = jnp.zeros(__magic_name__ , dtype=jnp.floataa ) lowerCamelCase_ : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa ) lowerCamelCase_ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) lowerCamelCase_ : Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8) lowerCamelCase_ : Dict = jnp.zeros(__magic_name__ , dtype=jnp.floataa ) lowerCamelCase_ , lowerCamelCase_ : Any = jax.random.split(__magic_name__ ) lowerCamelCase_ : Optional[Any] = {"params": params_rng, "dropout": dropout_rng} return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"] def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: lowerCamelCase_ : Optional[Any] = self.block_out_channels lowerCamelCase_ : Dict = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowerCamelCase_ : str = self.num_attention_heads or self.attention_head_dim # input lowerCamelCase_ : Union[str, Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time lowerCamelCase_ : Tuple = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) lowerCamelCase_ : Dict = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype ) lowerCamelCase_ : List[Any] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) lowerCamelCase_ : Optional[Any] = self.only_cross_attention if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase_ : int = (only_cross_attention,) * len(self.down_block_types ) if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase_ : str = (num_attention_heads,) * len(self.down_block_types ) # down lowerCamelCase_ : str = [] lowerCamelCase_ : List[Any] = [] lowerCamelCase_ : Union[str, Any] = block_out_channels[0] lowerCamelCase_ : Optional[int] = nn.Conv( __magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__magic_name__ ) for i, down_block_type in enumerate(self.down_block_types ): lowerCamelCase_ : List[Any] = output_channel lowerCamelCase_ : List[Any] = block_out_channels[i] lowerCamelCase_ : str = i == len(__magic_name__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": lowerCamelCase_ : List[Any] = FlaxCrossAttnDownBlockaD( in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: lowerCamelCase_ : Union[str, Any] = FlaxDownBlockaD( in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(__magic_name__ ) for _ in range(self.layers_per_block ): lowerCamelCase_ : int = nn.Conv( __magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__magic_name__ ) if not is_final_block: lowerCamelCase_ : Any = nn.Conv( __magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(__magic_name__ ) lowerCamelCase_ : List[Any] = down_blocks lowerCamelCase_ : Union[str, Any] = controlnet_down_blocks # mid lowerCamelCase_ : int = block_out_channels[-1] lowerCamelCase_ : Optional[Any] = FlaxUNetMidBlockaDCrossAttn( in_channels=__magic_name__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) lowerCamelCase_ : Any = nn.Conv( __magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : int , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : float = 1.0 , __magic_name__ : bool = True , __magic_name__ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]: lowerCamelCase_ : int = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowerCamelCase_ : Dict = jnp.flip(__magic_name__ , axis=1 ) # 1. time if not isinstance(__magic_name__ , jnp.ndarray ): lowerCamelCase_ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0: lowerCamelCase_ : Any = timesteps.astype(dtype=jnp.floataa ) lowerCamelCase_ : Optional[Any] = jnp.expand_dims(__magic_name__ , 0 ) lowerCamelCase_ : Union[str, Any] = self.time_proj(__magic_name__ ) lowerCamelCase_ : str = self.time_embedding(__magic_name__ ) # 2. pre-process lowerCamelCase_ : Union[str, Any] = jnp.transpose(__magic_name__ , (0, 2, 3, 1) ) lowerCamelCase_ : Union[str, Any] = self.conv_in(__magic_name__ ) lowerCamelCase_ : Tuple = jnp.transpose(__magic_name__ , (0, 2, 3, 1) ) lowerCamelCase_ : int = self.controlnet_cond_embedding(__magic_name__ ) sample += controlnet_cond # 3. down lowerCamelCase_ : List[str] = (sample,) for down_block in self.down_blocks: if isinstance(__magic_name__ , __magic_name__ ): lowerCamelCase_ , lowerCamelCase_ : Dict = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train ) else: lowerCamelCase_ , lowerCamelCase_ : Tuple = down_block(__magic_name__ , __magic_name__ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid lowerCamelCase_ : List[str] = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train ) # 5. contronet blocks lowerCamelCase_ : List[str] = () for down_block_res_sample, controlnet_block in zip(__magic_name__ , self.controlnet_down_blocks ): lowerCamelCase_ : Optional[int] = controlnet_block(__magic_name__ ) controlnet_down_block_res_samples += (down_block_res_sample,) lowerCamelCase_ : Union[str, Any] = controlnet_down_block_res_samples lowerCamelCase_ : Tuple = self.controlnet_mid_block(__magic_name__ ) # 6. scaling lowerCamelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=__magic_name__ , mid_block_res_sample=__magic_name__ )
488
class snake_case_ : '''simple docstring''' def __init__( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] ) -> Union[str, Any]: lowerCamelCase_ : Any = name lowerCamelCase_ : Optional[Any] = value lowerCamelCase_ : str = weight def __repr__( self : int ) -> Optional[int]: return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: return self.value def __SCREAMING_SNAKE_CASE ( self : str ) -> int: return self.name def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return self.weight def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: return self.value / self.weight def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> int: """simple docstring""" lowerCamelCase_ : int = [] for i in range(len(__UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : Dict = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase ) lowerCamelCase_ : List[Any] = [] lowerCamelCase_ , lowerCamelCase_ : Tuple = 0.0, 0.0 for i in range(len(__UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def __a ( ) -> List[str]: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
488
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a : str = logging.get_logger(__name__) __a : Tuple = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class UpperCAmelCase( __UpperCAmelCase ): """simple docstring""" a : Union[str, Any] = '''efficientnet''' def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 600 , lowerCamelCase = 2.0 , lowerCamelCase = 3.1 , lowerCamelCase = 8 , lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase = [] , lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase = 0.25 , lowerCamelCase = "swish" , lowerCamelCase = 2560 , lowerCamelCase = "mean" , lowerCamelCase = 0.02 , lowerCamelCase = 0.0_01 , lowerCamelCase = 0.99 , lowerCamelCase = 0.5 , lowerCamelCase = 0.2 , **lowerCamelCase , ) -> Dict: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) lowercase__ : List[Any] = num_channels lowercase__ : List[str] = image_size lowercase__ : List[Any] = width_coefficient lowercase__ : Optional[int] = depth_coefficient lowercase__ : List[Any] = depth_divisor lowercase__ : Tuple = kernel_sizes lowercase__ : Dict = in_channels lowercase__ : Union[str, Any] = out_channels lowercase__ : Dict = depthwise_padding lowercase__ : Dict = strides lowercase__ : Any = num_block_repeats lowercase__ : List[str] = expand_ratios lowercase__ : str = squeeze_expansion_ratio lowercase__ : List[Any] = hidden_act lowercase__ : Optional[int] = hidden_dim lowercase__ : Dict = pooling_type lowercase__ : int = initializer_range lowercase__ : Union[str, Any] = batch_norm_eps lowercase__ : Optional[int] = batch_norm_momentum lowercase__ : List[Any] = dropout_rate lowercase__ : Union[str, Any] = drop_connect_rate lowercase__ : str = sum(__SCREAMING_SNAKE_CASE ) * 4 class UpperCAmelCase( __UpperCAmelCase ): """simple docstring""" a : Any = version.parse("""1.11""" ) @property def __a ( self ) -> Dict: """simple docstring""" return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __a ( self ) -> str: """simple docstring""" return 1E-5
716
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=2 ) -> Dict: from .. import __version__ lowercase__ : List[str] = take_from lowercase__ : Optional[Any] = () if not isinstance(args[0] ,SCREAMING_SNAKE_CASE_ ): lowercase__ : Optional[Any] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""" ) lowercase__ : Tuple = None if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),) lowercase__ : Optional[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): values += (getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ),) lowercase__ : str = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: lowercase__ : Any = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: lowercase__ : Union[str, Any] = warning + " " if standard_warn else "" warnings.warn(warning + message ,SCREAMING_SNAKE_CASE_ ,stacklevel=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1] lowercase__ : Union[str, Any] = call_frame.filename lowercase__ : Optional[Any] = call_frame.lineno lowercase__ : str = call_frame.function lowercase__ , lowercase__ : List[str] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(SCREAMING_SNAKE_CASE_ ) == 0: return elif len(SCREAMING_SNAKE_CASE_ ) == 1: return values[0] return values
298
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=10 ): '''simple docstring''' _a = [] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple=10 ): '''simple docstring''' _a = [] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(UpperCamelCase , '''schedule.bin''' ) torch.save(scheduler.state_dict() , UpperCamelCase ) _a = torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int: """simple docstring""" self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" _a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ ) _a = torch.tensor([0.4, 0.2, -0.5] ) _a = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _a = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): _a = criterion(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" _a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ ) _a = torch.tensor([0.4, 0.2, -0.5] ) _a = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _a = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase_ , weight_decay=0.0 , relative_step=lowerCAmelCase_ , scale_parameter=lowerCAmelCase_ , warmup_init=lowerCAmelCase_ , ) for _ in range(10_00 ): _a = criterion(lowerCAmelCase_ , lowerCAmelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class A ( unittest.TestCase ): lowercase_ = nn.Linear(50 ,50 ) if is_torch_available() else None lowercase_ = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None lowercase_ = 10 def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]: """simple docstring""" self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ): self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ , msg=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _a = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): _a , _a = data _a = scheduler_func(self.optimizer , **lowerCAmelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _a = unwrap_schedule(lowerCAmelCase_ , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase_ , lowerCAmelCase_ , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , ) _a = scheduler_func(self.optimizer , **lowerCAmelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_ ) # wrap to test picklability of the schedule _a = unwrap_and_save_reload_schedule(lowerCAmelCase_ , self.num_steps ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ , msg=F'failed for {scheduler_func} in save and reload' ) class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]: """simple docstring""" _a = fn def __call__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> Optional[Any]: """simple docstring""" return self.fn(*lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple: """simple docstring""" _a = list(map(self , scheduler.lr_lambdas ) )
22
'''simple docstring''' class A__ : def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: '''simple docstring''' _a : List[str] =None _a : Optional[Any] =None _a : str =graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _a : Optional[int] =len(SCREAMING_SNAKE_CASE ) _a : Union[str, Any] =None def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any: '''simple docstring''' if sources is int: _a : Tuple =[sources] if sinks is int: _a : Optional[int] =[sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return _a : Union[str, Any] =sources[0] _a : Tuple =sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: _a : Tuple =0 for i in sources: max_input_flow += sum(self.graph[i] ) _a : List[Any] =len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _a : Any =max_input_flow _a : List[str] =0 _a : List[str] =len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _a : str =max_input_flow _a : Optional[Any] =size - 1 def __UpperCAmelCase ( self :Optional[int] ) -> Tuple: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int: '''simple docstring''' _a : Tuple =algorithm(self ) class A__ : def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict: '''simple docstring''' _a : List[str] =flow_network _a : List[Any] =flow_network.verticesCount _a : str =flow_network.sourceIndex _a : str =flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _a : List[Any] =flow_network.graph _a : Optional[int] =False def __UpperCAmelCase ( self :List[Any] ) -> List[str]: '''simple docstring''' if not self.executed: self._algorithm() _a : Any =True def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass class A__ ( UpperCAmelCase__ ): def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result _a : List[Any] =-1 def __UpperCAmelCase ( self :Dict ) -> Tuple: '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class A__ ( UpperCAmelCase__ ): def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE ) _a : int =[[0] * self.verticies_count for i in range(self.verticies_count )] _a : Union[str, Any] =[0] * self.verticies_count _a : Optional[Any] =[0] * self.verticies_count def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' _a : int =self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _a : Tuple =[ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _a : List[Any] =0 while i < len(SCREAMING_SNAKE_CASE ): _a : Any =vertices_list[i] _a : str =self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) _a : List[str] =0 else: i += 1 _a : Optional[int] =sum(self.preflow[self.source_index] ) def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]: '''simple docstring''' _a : List[str] =min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]: '''simple docstring''' _a : int =None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _a : Optional[Any] =self.heights[to_index] if min_height is not None: _a : Any =min_height + 1 if __name__ == "__main__": A__: str = [0] A__: Optional[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network A__: Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate A__: List[str] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
694
0
"""simple docstring""" from math import pow, sqrt def lowerCAmelCase__ ( *_UpperCamelCase : List[str] ) -> Tuple: """simple docstring""" snake_case = len(lowerCAmelCase_ ) > 0 and all(value > 0.0 for value in values ) return result def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : List[str] ) -> Union[str, Any]: """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase_ , lowerCAmelCase_ ) else ValueError('Input Error: Molar mass values must greater than 0.' ) ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> str: """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int: """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> List[str]: """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[int]: """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else ValueError( 'Input Error: Molar mass and effusion rate values must greater than 0.' ) )
703
"""simple docstring""" from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: """simple docstring""" snake_case = [] snake_case = [] snake_case = [] for rt in rc.restypes: snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case = {name: i for i, name in enumerate(_UpperCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.floataa , device=protein['aatype'].device , ) snake_case = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case = restype_atomaa_to_atomaa[protein_aatype] snake_case = restype_atomaa_mask[protein_aatype] snake_case = residx_atomaa_mask snake_case = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case = restype_atomaa_to_atomaa[protein_aatype] snake_case = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case = rc.restype_atoa[restype_letter] snake_case = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case = rc.atom_order[atom_name] snake_case = 1 snake_case = restype_atomaa_mask[protein_aatype] snake_case = residx_atomaa_mask return protein def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: """simple docstring""" snake_case = tree_map(lambda _UpperCamelCase : torch.tensor(_UpperCamelCase , device=batch['aatype'].device ) , _UpperCamelCase , np.ndarray ) snake_case = tensor_tree_map(lambda _UpperCamelCase : np.array(_UpperCamelCase ) , make_atomaa_masks(_UpperCamelCase ) ) return out
104
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __lowerCAmelCase ( A_ : List[str] ) -> Optional[Any]: __UpperCAmelCase = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: __UpperCAmelCase = [1_44, 1_92, 2_40] __UpperCAmelCase = [16, 32, 64, 96, 1_28, 1_60, 6_40] elif "mobilevit_xs" in mobilevit_name: __UpperCAmelCase = [96, 1_20, 1_44] __UpperCAmelCase = [16, 32, 48, 64, 80, 96, 3_84] elif "mobilevit_xxs" in mobilevit_name: __UpperCAmelCase = [64, 80, 96] __UpperCAmelCase = [16, 16, 24, 48, 64, 80, 3_20] __UpperCAmelCase = 0.05 __UpperCAmelCase = 2.0 if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase = 5_12 __UpperCAmelCase = 16 __UpperCAmelCase = 21 __UpperCAmelCase = "pascal-voc-id2label.json" else: __UpperCAmelCase = 10_00 __UpperCAmelCase = "imagenet-1k-id2label.json" __UpperCAmelCase = "huggingface/label-files" __UpperCAmelCase = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) ) __UpperCAmelCase = {int(_A ): v for k, v in idalabel.items()} __UpperCAmelCase = idalabel __UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __lowerCAmelCase ( A_ : Optional[int] , A_ : Dict=False ) -> Dict: for i in range(1 , 6 ): if F'''layer_{i}.''' in name: __UpperCAmelCase = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: __UpperCAmelCase = name.replace("conv_1." , "conv_stem." ) if ".block." in name: __UpperCAmelCase = name.replace(".block." , "." ) if "exp_1x1" in name: __UpperCAmelCase = name.replace("exp_1x1" , "expand_1x1" ) if "red_1x1" in name: __UpperCAmelCase = name.replace("red_1x1" , "reduce_1x1" ) if ".local_rep.conv_3x3." in name: __UpperCAmelCase = name.replace(".local_rep.conv_3x3." , ".conv_kxk." ) if ".local_rep.conv_1x1." in name: __UpperCAmelCase = name.replace(".local_rep.conv_1x1." , ".conv_1x1." ) if ".norm." in name: __UpperCAmelCase = name.replace(".norm." , ".normalization." ) if ".conv." in name: __UpperCAmelCase = name.replace(".conv." , ".convolution." ) if ".conv_proj." in name: __UpperCAmelCase = name.replace(".conv_proj." , ".conv_projection." ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: __UpperCAmelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: __UpperCAmelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' ) if "expand_1x1" in name: __UpperCAmelCase = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" ) if "conv_3x3" in name: __UpperCAmelCase = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" ) if "reduce_1x1" in name: __UpperCAmelCase = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" ) for i in range(2 , 5 ): if F'''.global_rep.{i}.weight''' in name: __UpperCAmelCase = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" ) if F'''.global_rep.{i}.bias''' in name: __UpperCAmelCase = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" ) if ".global_rep." in name: __UpperCAmelCase = name.replace(".global_rep." , ".transformer." ) if ".pre_norm_mha.0." in name: __UpperCAmelCase = name.replace(".pre_norm_mha.0." , ".layernorm_before." ) if ".pre_norm_mha.1.out_proj." in name: __UpperCAmelCase = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." ) if ".pre_norm_ffn.0." in name: __UpperCAmelCase = name.replace(".pre_norm_ffn.0." , ".layernorm_after." ) if ".pre_norm_ffn.1." in name: __UpperCAmelCase = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." ) if ".pre_norm_ffn.4." in name: __UpperCAmelCase = name.replace(".pre_norm_ffn.4." , ".output.dense." ) if ".transformer." in name: __UpperCAmelCase = name.replace(".transformer." , ".transformer.layer." ) if ".aspp_layer." in name: __UpperCAmelCase = name.replace(".aspp_layer." , "." ) if ".aspp_pool." in name: __UpperCAmelCase = name.replace(".aspp_pool." , "." ) if "seg_head." in name: __UpperCAmelCase = name.replace("seg_head." , "segmentation_head." ) if "segmentation_head.classifier.classifier." in name: __UpperCAmelCase = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." ) if "classifier.fc." in name: __UpperCAmelCase = name.replace("classifier.fc." , "classifier." ) elif (not base_model) and ("segmentation_head." not in name): __UpperCAmelCase = "mobilevit." + name return name def __lowerCAmelCase ( A_ : Any , A_ : Union[str, Any] , A_ : Optional[Any]=False ) -> Tuple: if base_model: __UpperCAmelCase = "" else: __UpperCAmelCase = "mobilevit." for key in orig_state_dict.copy().keys(): __UpperCAmelCase = orig_state_dict.pop(_A ) if key[:8] == "encoder.": __UpperCAmelCase = key[8:] if "qkv" in key: __UpperCAmelCase = key.split("." ) __UpperCAmelCase = int(key_split[0][6:] ) - 1 __UpperCAmelCase = int(key_split[3] ) __UpperCAmelCase = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' ) __UpperCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size __UpperCAmelCase = ( F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: __UpperCAmelCase = val[:dim, :] __UpperCAmelCase = val[dim : dim * 2, :] __UpperCAmelCase = val[-dim:, :] else: __UpperCAmelCase = val[:dim] __UpperCAmelCase = val[dim : dim * 2] __UpperCAmelCase = val[-dim:] else: __UpperCAmelCase = val return orig_state_dict def __lowerCAmelCase ( ) -> Union[str, Any]: __UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __UpperCAmelCase = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( A_ : str , A_ : int , A_ : int , A_ : str=False ) -> Union[str, Any]: __UpperCAmelCase = get_mobilevit_config(_A ) # load original state_dict __UpperCAmelCase = torch.load(_A , map_location="cpu" ) # load 🤗 model if mobilevit_name.startswith("deeplabv3_" ): __UpperCAmelCase = MobileViTForSemanticSegmentation(_A ).eval() else: __UpperCAmelCase = MobileViTForImageClassification(_A ).eval() __UpperCAmelCase = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by MobileViTImageProcessor __UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) __UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ) __UpperCAmelCase = model(**_A ) __UpperCAmelCase = outputs.logits if mobilevit_name.startswith("deeplabv3_" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": __UpperCAmelCase = torch.tensor( [ [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]], [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]], [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": __UpperCAmelCase = torch.tensor( [ [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]], [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]], [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": __UpperCAmelCase = torch.tensor( [ [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]], [[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]], [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]], ] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) else: assert logits.shape == (1, 10_00) if mobilevit_name == "mobilevit_s": __UpperCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] ) elif mobilevit_name == "mobilevit_xs": __UpperCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] ) elif mobilevit_name == "mobilevit_xxs": __UpperCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , _A , atol=1e-4 ) Path(_A ).mkdir(exist_ok=_A ) print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_A ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_A ) if push_to_hub: __UpperCAmelCase = { "mobilevit_s": "mobilevit-small", "mobilevit_xs": "mobilevit-x-small", "mobilevit_xxs": "mobilevit-xx-small", "deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small", "deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small", "deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small", } print("Pushing to the hub..." ) __UpperCAmelCase = model_mapping[mobilevit_name] image_processor.push_to_hub(_A , organization="apple" ) model.push_to_hub(_A , organization="apple" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a_ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
221
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__) class A_ ( enum.Enum ): _SCREAMING_SNAKE_CASE = """all_checks""" _SCREAMING_SNAKE_CASE = """basic_checks""" _SCREAMING_SNAKE_CASE = """no_checks""" class A_ ( a_ ): pass class A_ ( a_ ): pass class A_ ( a_ ): pass class A_ ( a_ ): pass def __A ( _A , _A , _A=None ): """simple docstring""" if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(_A ) - set(_A ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(_A ) - set(_A ) ) ) if len(set(_A ) - set(_A ) ) > 0: raise UnexpectedDownloadedFile(str(set(_A ) - set(_A ) ) ) __a = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __a = " for " + verification_name if verification_name is not None else "" if len(_A ) > 0: raise NonMatchingChecksumError( f"""Checksums didn't match{for_verification_name}:\n""" f"""{bad_urls}\n""" "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class A_ ( a_ ): pass class A_ ( a_ ): pass class A_ ( a_ ): pass class A_ ( a_ ): pass def __A ( _A , _A ): """simple docstring""" if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(_A ) - set(_A ) ) > 0: raise ExpectedMoreSplits(str(set(_A ) - set(_A ) ) ) if len(set(_A ) - set(_A ) ) > 0: raise UnexpectedSplits(str(set(_A ) - set(_A ) ) ) __a = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(_A ) > 0: raise NonMatchingSplitsSizesError(str(_A ) ) logger.info("All the splits matched successfully." ) def __A ( _A , _A = True ): """simple docstring""" if record_checksum: __a = shaaaa() with open(_A , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B"" ): m.update(_A ) __a = m.hexdigest() else: __a = None return {"num_bytes": os.path.getsize(_A ), "checksum": checksum} def __A ( _A ): """simple docstring""" if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
197
0
lowercase_ = tuple[float, float, float] lowercase_ = tuple[float, float, float] def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = end_pointa[0] - end_pointa[0] lowercase__ = end_pointa[1] - end_pointa[1] lowercase__ = end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i lowercase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowercase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 ): lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
37
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowercase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowercase__ = f'''{src_lang}-{tgt_lang}''' lowercase__ = f''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" ) print(f'''Generating {path}''' ) with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f: f.write(SCREAMING_SNAKE_CASE_ ) # make sure we are under the root of the project lowercase_ = Path(__file__).resolve().parent.parent.parent lowercase_ = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""") lowercase_ = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
37
1
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class lowerCAmelCase_ : def __snake_case ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) snake_case : List[Any] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) snake_case : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) snake_case : int =UNetaDConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) snake_case : Any =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=_snake_case, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', ) torch.manual_seed(0 ) snake_case : List[str] =IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __snake_case ( self : str ): '''simple docstring''' torch.manual_seed(0 ) snake_case : Optional[Any] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) snake_case : int =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) snake_case : Optional[int] =UNetaDConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.414, time_embedding_act_fn='''gelu''', time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) snake_case : Dict =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=_snake_case, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', ) torch.manual_seed(0 ) snake_case : Tuple =DDPMScheduler( num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, ) torch.manual_seed(0 ) snake_case : Dict =IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def __snake_case ( self : int ): '''simple docstring''' snake_case : List[Any] =self.get_dummy_components() snake_case : Dict =self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) snake_case : Optional[int] =self.get_dummy_inputs(_snake_case ) snake_case : Any =inputs['''prompt'''] snake_case : str =inputs['''generator'''] snake_case : Union[str, Any] =inputs['''num_inference_steps'''] snake_case : Dict =inputs['''output_type'''] if "image" in inputs: snake_case : Union[str, Any] =inputs['''image'''] else: snake_case : List[str] =None if "mask_image" in inputs: snake_case : Optional[int] =inputs['''mask_image'''] else: snake_case : Union[str, Any] =None if "original_image" in inputs: snake_case : Any =inputs['''original_image'''] else: snake_case : List[Any] =None snake_case , snake_case : Dict =pipe.encode_prompt(_snake_case ) # inputs with prompt converted to embeddings snake_case : Union[str, Any] ={ '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: snake_case : int =image if mask_image is not None: snake_case : List[str] =mask_image if original_image is not None: snake_case : Any =original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_snake_case, _snake_case, _snake_case ) snake_case : Any =pipe(**_snake_case )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_snake_case ) snake_case : int =self.pipeline_class.from_pretrained(_snake_case ) pipe_loaded.to(_snake_case ) pipe_loaded.set_progress_bar_config(disable=_snake_case ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_snake_case, _snake_case ) is None, f'''`{optional_component}` did not stay set to None after loading.''', ) snake_case : List[Any] =self.get_dummy_inputs(_snake_case ) snake_case : str =inputs['''generator'''] snake_case : List[str] =inputs['''num_inference_steps'''] snake_case : Tuple =inputs['''output_type'''] # inputs with prompt converted to embeddings snake_case : Optional[int] ={ '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: snake_case : Optional[Any] =image if mask_image is not None: snake_case : Tuple =mask_image if original_image is not None: snake_case : Any =original_image snake_case : Optional[int] =pipe_loaded(**_snake_case )[0] snake_case : str =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max() self.assertLess(_snake_case, 1E-4 ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' snake_case : List[str] =self.get_dummy_components() snake_case : Optional[Any] =self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) snake_case : str =self.get_dummy_inputs(_snake_case ) snake_case : Optional[int] =pipe(**_snake_case )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_snake_case ) snake_case : Union[str, Any] =self.pipeline_class.from_pretrained(_snake_case ) pipe_loaded.to(_snake_case ) pipe_loaded.set_progress_bar_config(disable=_snake_case ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests snake_case : Tuple =self.get_dummy_inputs(_snake_case ) snake_case : int =pipe_loaded(**_snake_case )[0] snake_case : Optional[Any] =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max() self.assertLess(_snake_case, 1E-4 )
349
'''simple docstring''' import math import qiskit def _a ( lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 ): if ( isinstance(lowerCamelCase_ , lowerCamelCase_ ) or isinstance(lowerCamelCase_ , lowerCamelCase_ ) or isinstance(lowerCamelCase_ , lowerCamelCase_ ) ): raise TypeError('''inputs must be integers.''' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('''inputs must be positive.''' ) if ( (math.floor(lowerCamelCase_ ) != input_a) or (math.floor(lowerCamelCase_ ) != input_a) or (math.floor(lowerCamelCase_ ) != carry_in) ): raise ValueError('''inputs must be exact integers.''' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('''inputs must be less or equal to 2.''' ) # build registers snake_case : List[Any] =qiskit.QuantumRegister(4 , '''qr''' ) snake_case : Optional[Any] =qiskit.ClassicalRegister(2 , '''cr''' ) # list the entries snake_case : Any =[input_a, input_a, carry_in] snake_case : List[str] =qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCamelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCamelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCamelCase_ ) # measure the last two qbits snake_case : List[str] =qiskit.Aer.get_backend('''aer_simulator''' ) snake_case : Optional[int] =qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 ) return job.result().get_counts(lowerCamelCase_ ) if __name__ == "__main__": print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
349
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Tuple = { '''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''', '''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''', '''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''', '''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''', '''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''', '''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''', '''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''', '''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''', '''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''', } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "xmod" def __init__( self : Union[str, Any], _UpperCAmelCase : str=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : str=1_2, _UpperCAmelCase : Tuple=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : List[Any]="gelu", _UpperCAmelCase : List[Any]=0.1, _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : int=5_1_2, _UpperCAmelCase : Tuple=2, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Union[str, Any]=1E-12, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : Any=0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : Optional[Any]="absolute", _UpperCAmelCase : int=True, _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : Any=2, _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=("en_XX",), _UpperCAmelCase : Optional[Any]=None, **_UpperCAmelCase : Union[str, Any], ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : Dict = hidden_size SCREAMING_SNAKE_CASE__ : int = num_hidden_layers SCREAMING_SNAKE_CASE__ : Any = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = hidden_act SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : str = max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps SCREAMING_SNAKE_CASE__ : str = position_embedding_type SCREAMING_SNAKE_CASE__ : Dict = use_cache SCREAMING_SNAKE_CASE__ : Tuple = classifier_dropout SCREAMING_SNAKE_CASE__ : int = pre_norm SCREAMING_SNAKE_CASE__ : List[str] = adapter_reduction_factor SCREAMING_SNAKE_CASE__ : Dict = adapter_layer_norm SCREAMING_SNAKE_CASE__ : Tuple = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE__ : Dict = ln_before_adapter SCREAMING_SNAKE_CASE__ : List[Any] = list(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = default_language class lowerCamelCase (__lowerCamelCase ): """simple docstring""" @property def A_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE__ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE__ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
157
from __future__ import annotations from collections.abc import Sequence from typing import Literal def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if lista[i] != lista[i]: count += 1 SCREAMING_SNAKE_CASE__ : Any = "_" if count > 1: return False else: return "".join(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] while True: SCREAMING_SNAKE_CASE__ : Optional[Any] = ["$"] * len(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : str = compare_string(binary[i] , binary[j] ) if k is False: SCREAMING_SNAKE_CASE__ : int = "*" SCREAMING_SNAKE_CASE__ : Union[str, Any] = "*" temp.append("X" ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(SCREAMING_SNAKE_CASE__ ) == 0: return pi SCREAMING_SNAKE_CASE__ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) ) def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for minterm in minterms: SCREAMING_SNAKE_CASE__ : Optional[int] = "" for _ in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Any = str(minterm % 2 ) + string minterm //= 2 temp.append(SCREAMING_SNAKE_CASE__ ) return temp def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _a ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ ) for i in range(len(chart[0] ) ): SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = -1 for j in range(len(SCREAMING_SNAKE_CASE__ ) ): if chart[j][i] == 1: count += 1 SCREAMING_SNAKE_CASE__ : List[Any] = j if count == 1: SCREAMING_SNAKE_CASE__ : List[str] = 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 temp.append(prime_implicants[i] ) while True: SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Optional[Any] = -1 SCREAMING_SNAKE_CASE__ : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : int = chart[i].count(1 ) if count_n > max_n: SCREAMING_SNAKE_CASE__ : Tuple = count_n SCREAMING_SNAKE_CASE__ : str = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 def _a ( SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_implicants[i].count("_" ) for j in range(len(SCREAMING_SNAKE_CASE__ ) ): if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 return chart def _a ( ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = int(input("Enter the no. of variables\n" ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = [ float(SCREAMING_SNAKE_CASE__ ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] SCREAMING_SNAKE_CASE__ : Tuple = decimal_to_binary(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = check(SCREAMING_SNAKE_CASE__ ) print("Prime Implicants are:" ) print(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = prime_implicant_chart(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = selection(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print("Essential Prime Implicants are:" ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
157
1
class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = None A_ = None A_ = graph self._normalize_graph(UpperCAmelCase , UpperCAmelCase ) A_ = len(UpperCAmelCase ) A_ = None def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ): if sources is int: A_ = [sources] if sinks is int: A_ = [sinks] if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0: return A_ = sources[0] A_ = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1: A_ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A_ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A_ = max_input_flow A_ = 0 A_ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A_ = max_input_flow A_ = size - 1 def __A ( self : str ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __A ( self : Tuple , UpperCAmelCase : List[Any] ): A_ = algorithm(self ) class _a : """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : List[str] ): A_ = flow_network A_ = flow_network.verticesCount A_ = flow_network.sourceIndex A_ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A_ = flow_network.graph A_ = False def __A ( self : Optional[int] ): if not self.executed: self._algorithm() A_ = True def __A ( self : Dict ): pass class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ): super().__init__(UpperCAmelCase ) # use this to save your result A_ = -1 def __A ( self : Tuple ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ): super().__init__(UpperCAmelCase ) A_ = [[0] * self.verticies_count for i in range(self.verticies_count )] A_ = [0] * self.verticies_count A_ = [0] * self.verticies_count def __A ( self : List[str] ): A_ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A_ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A_ = 0 while i < len(UpperCAmelCase ): A_ = vertices_list[i] A_ = self.heights[vertex_index] self.process_vertex(UpperCAmelCase ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) ) A_ = 0 else: i += 1 A_ = sum(self.preflow[self.source_index] ) def __A ( self : List[str] , UpperCAmelCase : Dict ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase , UpperCAmelCase ) self.relabel(UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ): A_ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A_ = self.heights[to_index] if min_height is not None: A_ = min_height + 1 if __name__ == "__main__": __a :Tuple = [0] __a :Tuple = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __a :List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __a :List[Any] = flow_network.find_maximum_flow() print(F"maximum flow is {maximum_flow}")
86
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCamelCase = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __A ( unittest.TestCase ): """simple docstring""" def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=None , a__=True , a__=True , a__=None , ): """simple docstring""" _lowerCamelCase : str = size if size is not None else {'''height''': 20, '''width''': 20} _lowerCamelCase : int = parent _lowerCamelCase : Tuple = batch_size _lowerCamelCase : Union[str, Any] = num_channels _lowerCamelCase : Tuple = image_size _lowerCamelCase : Dict = min_resolution _lowerCamelCase : int = max_resolution _lowerCamelCase : Dict = size _lowerCamelCase : Dict = do_normalize _lowerCamelCase : List[Any] = do_convert_rgb _lowerCamelCase : Optional[int] = [512, 1024, 2048, 4096] _lowerCamelCase : Optional[int] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def __snake_case ( self): """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __snake_case ( self): """simple docstring""" _lowerCamelCase : Dict = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' _lowerCamelCase : Union[str, Any] = Image.open(requests.get(a__ , stream=a__).raw).convert('''RGB''') return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,) @require_torch @require_vision class __A ( lowerCamelCase__ ,unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def __snake_case ( self): """simple docstring""" _lowerCamelCase : int = PixaStructImageProcessingTester(self) @property def __snake_case ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self): """simple docstring""" _lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a__ , '''do_normalize''')) self.assertTrue(hasattr(a__ , '''do_convert_rgb''')) def __snake_case ( self): """simple docstring""" _lowerCamelCase : Any = self.image_processor_tester.prepare_dummy_image() _lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict) _lowerCamelCase : Optional[int] = 2048 _lowerCamelCase : Any = image_processor(a__ , return_tensors='''pt''' , max_patches=a__) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606) , atol=1e-3 , rtol=1e-3)) def __snake_case ( self): """simple docstring""" _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__) for image in image_inputs: self.assertIsInstance(a__ , Image.Image) # Test not batched input _lowerCamelCase : Optional[Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Dict = image_processor( a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self): """simple docstring""" _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__) for image in image_inputs: self.assertIsInstance(a__ , Image.Image) # Test not batched input _lowerCamelCase : List[str] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 _lowerCamelCase : Dict = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(a__): _lowerCamelCase : List[Any] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches _lowerCamelCase : List[Any] = '''Hello''' _lowerCamelCase : int = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__ , header_text=a__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Dict = image_processor( a__ , return_tensors='''pt''' , max_patches=a__ , header_text=a__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self): """simple docstring""" _lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__) for image in image_inputs: self.assertIsInstance(a__ , np.ndarray) _lowerCamelCase : Any = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : List[str] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Tuple = image_processor( a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __snake_case ( self): """simple docstring""" _lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__) for image in image_inputs: self.assertIsInstance(a__ , torch.Tensor) # Test not batched input _lowerCamelCase : Tuple = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : List[str] = image_processor( a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,) @require_torch @require_vision class __A ( lowerCamelCase__ ,unittest.TestCase ): """simple docstring""" UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None def __snake_case ( self): """simple docstring""" _lowerCamelCase : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4) _lowerCamelCase : Union[str, Any] = 3 @property def __snake_case ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self): """simple docstring""" _lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a__ , '''do_normalize''')) self.assertTrue(hasattr(a__ , '''do_convert_rgb''')) def __snake_case ( self): """simple docstring""" _lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PIL images _lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__) for image in image_inputs: self.assertIsInstance(a__ , Image.Image) # Test not batched input _lowerCamelCase : List[Any] = ( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Union[str, Any] = image_processor( a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
114
0
import os from typing import Dict, List, Tuple, TypeVar, Union __SCREAMING_SNAKE_CASE : Any = TypeVar('''T''') __SCREAMING_SNAKE_CASE : Any = Union[List[T], Tuple[T, ...]] __SCREAMING_SNAKE_CASE : str = Union[T, List[T], Dict[str, T]] __SCREAMING_SNAKE_CASE : Optional[int] = Union[str, bytes, os.PathLike]
149
def snake_case_ ( lowercase__ : int ): '''simple docstring''' _lowerCAmelCase =n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
149
1