{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \\n\\n\";\n std::string initial_request;\n tensorstore::internal::Thread serve_thread({\"serve_thread\"}, [&] {\n auto client_fd = AcceptNonBlocking(*socket);\n ABSL_CHECK(client_fd.has_value());\n initial_request = ReceiveAvailable(*client_fd);\n AssertSend(*client_fd, kResponse);\n CloseSocket(*client_fd);\n });\n auto response = transport->IssueRequest(\n HttpRequestBuilder(\"POST\", absl::StrCat(\"http:\n .AddHeader(\"X-foo: bar\")\n .AddQueryParameter(\"name\", \"dragon\")\n .AddQueryParameter(\"age\", \"1234\")\n .EnableAcceptEncoding()\n .BuildRequest(),\n IssueRequestOptions(absl::Cord(\"Hello\")));\n ABSL_LOG(INFO) << response.status();\n ABSL_LOG(INFO) << \"Wait on server\";\n serve_thread.Join();\n CloseSocket(*socket);\n EXPECT_THAT(initial_request, HasSubstr(\"POST /?name=dragon&age=1234\"));\n EXPECT_THAT(initial_request,\n HasSubstr(absl::StrCat(\"Host: \", hostport, \"\\r\\n\")));\n EXPECT_THAT(initial_request, HasSubstr(\"Accept: **\\r\\n\"));\n EXPECT_THAT(request, HasSubstr(\"X-foo: bar\\r\\n\"));\n EXPECT_THAT(request, HasSubstr(\"Content-Length: 5\"));\n EXPECT_THAT(\n request,\n HasSubstr(\"Content-Type: application/x-www-form-urlencoded\\r\\n\"));\n EXPECT_THAT(request, HasSubstr(\"Hello\"));\n }\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/curl_transport_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":1177,"cells":{"ID":{"kind":"string","value":"1ee979de-8d4e-44bc-bbbc-6a20fdeda327"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"case_format"},"File Path in Repository":{"kind":"string","value":"tensorflow/c/experimental/ops/gen/common/case_format.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/c/experimental/ops/gen/common/case_format_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/c/experimental/ops/gen/common/case_format.h\"\n#include \"absl/strings/ascii.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace generator {\nnamespace {\nenum CaseFormatType {\n LOWER_CAMEL,\n UPPER_CAMEL,\n LOWER_SNAKE,\n UPPER_SNAKE,\n};\nstring FormatStringCase(const string &str, CaseFormatType to,\n const char delimiter = '_') {\n const bool from_snake = (str == absl::AsciiStrToUpper(str)) ||\n (str == absl::AsciiStrToLower(str));\n const bool toUpper = (to == UPPER_CAMEL || to == UPPER_SNAKE);\n const bool toSnake = (to == LOWER_SNAKE || to == UPPER_SNAKE);\n string result;\n bool inputStart = true;\n bool wordStart = true;\n for (const char c : str) {\n if (c == delimiter) {\n if (wordStart) {\n result.push_back(delimiter);\n }\n wordStart = true;\n continue;\n }\n if (!from_snake && isupper(c)) {\n wordStart = true;\n }\n if (wordStart && toSnake && !inputStart) {\n result.push_back(delimiter);\n }\n const bool shouldCapIfSnake = toUpper;\n const bool shouldCapIfCamel = wordStart && (toUpper || !inputStart);\n if ((toSnake && shouldCapIfSnake) || (!toSnake && shouldCapIfCamel)) {\n result += toupper(c);\n } else {\n result += tolower(c);\n }\n wordStart = false;\n inputStart = false;\n }\n if (wordStart) {\n result.push_back(delimiter);\n }\n return result;\n}\n} \nstring toLowerCamel(const string &s, const char delimiter) {\n return FormatStringCase(s, LOWER_CAMEL, delimiter);\n}\nstring toLowerSnake(const string &s, const char delimiter) {\n return FormatStringCase(s, LOWER_SNAKE, delimiter);\n}\nstring toUpperCamel(const string &s, const char delimiter) {\n return FormatStringCase(s, UPPER_CAMEL, delimiter);\n}\nstring toUpperSnake(const string &s, const char delimiter) {\n return FormatStringCase(s, UPPER_SNAKE, delimiter);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/c/experimental/ops/gen/common/case_format.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace generator {\nnamespace {\nstruct Variations {\n string lower_camel;\n string lower_snake;\n string upper_camel;\n string upper_snake;\n};\nvoid TestSingleVariation(const string &str, Variations expected,\n char delimiter = '_') {\n EXPECT_EQ(expected.lower_camel, toLowerCamel(str, delimiter));\n EXPECT_EQ(expected.lower_snake, toLowerSnake(str, delimiter));\n EXPECT_EQ(expected.upper_camel, toUpperCamel(str, delimiter));\n EXPECT_EQ(expected.upper_snake, toUpperSnake(str, delimiter));\n}\nvoid TestAllVariations(Variations variations, char delimiter = '_') {\n TestSingleVariation(variations.lower_camel, variations, delimiter);\n TestSingleVariation(variations.lower_snake, variations, delimiter);\n TestSingleVariation(variations.upper_camel, variations, delimiter);\n TestSingleVariation(variations.upper_snake, variations, delimiter);\n}\nTEST(CppOpGenCaseFormat, test_single_word) {\n TestAllVariations(Variations{\n \"three\",\n \"three\",\n \"Three\",\n \"THREE\",\n });\n}\nTEST(CppOpGenCaseFormat, test_complex_string) {\n TestAllVariations(Variations{\n \"threeNTest33Words\",\n \"three_n_test33_words\",\n \"ThreeNTest33Words\",\n \"THREE_N_TEST33_WORDS\",\n });\n}\nTEST(CppOpGenCaseFormat, test_hyphen_delimiter) {\n TestAllVariations(\n Variations{\n \"threeNTest33Words\",\n \"three-n-test33-words\",\n \"ThreeNTest33Words\",\n \"THREE-N-TEST33-WORDS\",\n },\n '-');\n}\nTEST(CppOpGenCaseFormat, test_trailing_underscore) {\n TestAllVariations(Variations{\n \"threeNTest33Words_\",\n \"three_n_test33_words_\",\n \"ThreeNTest33Words_\",\n \"THREE_N_TEST33_WORDS_\",\n });\n}\nTEST(CppOpGenCaseFormat, test_double_trailing_underscores) {\n TestAllVariations(Variations{\n \"xxY__\",\n \"xx_y__\",\n \"XxY__\",\n \"XX_Y__\",\n });\n}\nTEST(CppOpGenCaseFormat, test_leading_underscore) {\n TestAllVariations(Variations{\n \"_threeNTest33Words\",\n \"_three_n_test33_words\",\n \"_ThreeNTest33Words\",\n \"_THREE_N_TEST33_WORDS\",\n });\n}\nTEST(CppOpGenCaseFormat, test_double_leading_underscores) {\n TestAllVariations(Variations{\n \"__threeNTest33Words\",\n \"__three_n_test33_words\",\n \"__ThreeNTest33Words\",\n \"__THREE_N_TEST33_WORDS\",\n });\n}\nTEST(CppOpGenCaseFormat, test_leading_and_trailing_underscores) {\n TestAllVariations(Variations{\n \"__threeNTest33Words____\",\n \"__three_n_test33_words____\",\n \"__ThreeNTest33Words____\",\n \"__THREE_N_TEST33_WORDS____\",\n });\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/common/case_format_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1178,"cells":{"ID":{"kind":"string","value":"b306449e-1028-4a65-a9cd-5129cd3fe38b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"cordz_info"},"File Path in Repository":{"kind":"string","value":"absl/strings/internal/cordz_info.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/strings/internal/cordz_info_test.cc"},"Code":{"kind":"string","value":"#include \"absl/strings/internal/cordz_info.h\"\n#include \n#include \"absl/base/config.h\"\n#include \"absl/base/internal/spinlock.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/debugging/stacktrace.h\"\n#include \"absl/strings/internal/cord_internal.h\"\n#include \"absl/strings/internal/cord_rep_btree.h\"\n#include \"absl/strings/internal/cord_rep_crc.h\"\n#include \"absl/strings/internal/cordz_handle.h\"\n#include \"absl/strings/internal/cordz_statistics.h\"\n#include \"absl/strings/internal/cordz_update_tracker.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/types/span.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace cord_internal {\n#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL\nconstexpr size_t CordzInfo::kMaxStackDepth;\n#endif\nABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};\nnamespace {\nclass CordRepAnalyzer {\n public:\n explicit CordRepAnalyzer(CordzStatistics& statistics)\n : statistics_(statistics) {}\n void AnalyzeCordRep(const CordRep* rep) {\n ABSL_ASSERT(rep != nullptr);\n size_t refcount = rep->refcount.Get();\n RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};\n if (repref.tag() == CRC) {\n statistics_.node_count++;\n statistics_.node_counts.crc++;\n memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);\n repref = repref.Child(repref.rep->crc()->child);\n }\n repref = CountLinearReps(repref, memory_usage_);\n switch (repref.tag()) {\n case CordRepKind::BTREE:\n AnalyzeBtree(repref);\n break;\n default:\n ABSL_ASSERT(repref.tag() == CordRepKind::UNUSED_0);\n break;\n }\n statistics_.estimated_memory_usage += memory_usage_.total;\n statistics_.estimated_fair_share_memory_usage +=\n static_cast(memory_usage_.fair_share);\n }\n private:\n struct RepRef {\n const CordRep* rep;\n size_t refcount;\n RepRef Child(const CordRep* child) const {\n if (child == nullptr) return RepRef{nullptr, 0};\n return RepRef{child, refcount * child->refcount.Get()};\n }\n constexpr CordRepKind tag() const {\n ABSL_ASSERT(rep == nullptr || rep->tag != CordRepKind::UNUSED_0);\n return rep ? static_cast(rep->tag) : CordRepKind::UNUSED_0;\n }\n };\n struct MemoryUsage {\n size_t total = 0;\n double fair_share = 0.0;\n void Add(size_t size, size_t refcount) {\n total += size;\n fair_share += static_cast(size) / refcount;\n }\n };\n void CountFlat(size_t size) {\n statistics_.node_count++;\n statistics_.node_counts.flat++;\n if (size <= 64) {\n statistics_.node_counts.flat_64++;\n } else if (size <= 128) {\n statistics_.node_counts.flat_128++;\n } else if (size <= 256) {\n statistics_.node_counts.flat_256++;\n } else if (size <= 512) {\n statistics_.node_counts.flat_512++;\n } else if (size <= 1024) {\n statistics_.node_counts.flat_1k++;\n }\n }\n RepRef CountLinearReps(RepRef rep, MemoryUsage& memory_usage) {\n while (rep.tag() == SUBSTRING) {\n statistics_.node_count++;\n statistics_.node_counts.substring++;\n memory_usage.Add(sizeof(CordRepSubstring), rep.refcount);\n rep = rep.Child(rep.rep->substring()->child);\n }\n if (rep.tag() >= FLAT) {\n size_t size = rep.rep->flat()->AllocatedSize();\n CountFlat(size);\n memory_usage.Add(size, rep.refcount);\n return RepRef{nullptr, 0};\n }\n if (rep.tag() == EXTERNAL) {\n statistics_.node_count++;\n statistics_.node_counts.external++;\n size_t size = rep.rep->length + sizeof(CordRepExternalImpl);\n memory_usage.Add(size, rep.refcount);\n return RepRef{nullptr, 0};\n }\n return rep;\n }\n void AnalyzeBtree(RepRef rep) {\n statistics_.node_count++;\n statistics_.node_counts.btree++;\n memory_usage_.Add(sizeof(CordRepBtree), rep.refcount);\n const CordRepBtree* tree = rep.rep->btree();\n if (tree->height() > 0) {\n for (CordRep* edge : tree->Edges()) {\n AnalyzeBtree(rep.Child(edge));\n }\n } else {\n for (CordRep* edge : tree->Edges()) {\n CountLinearReps(rep.Child(edge), memory_usage_);\n }\n }\n }\n CordzStatistics& statistics_;\n MemoryUsage memory_usage_;\n};\n} \nCordzInfo* CordzInfo::Head(const CordzSnapshot& snapshot) {\n ABSL_ASSERT(snapshot.is_snapshot());\n CordzInfo* head = global_list_.head.load(std::memory_order_acquire);\n ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(head));\n return head;\n}\nCordzInfo* CordzInfo::Next(const CordzSnapshot& snapshot) const {\n ABSL_ASSERT(snapshot.is_snapshot());\n CordzInfo* next = ci_next_.load(std::memory_order_acquire);\n ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(this));\n ABSL_ASSERT(snapshot.DiagnosticsHandleIsSafeToInspect(next));\n return next;\n}\nvoid CordzInfo::TrackCord(InlineData& cord, MethodIdentifier method,\n int64_t sampling_stride) {\n assert(cord.is_tree());\n assert(!cord.is_profiled());\n CordzInfo* cordz_info =\n new CordzInfo(cord.as_tree(), nullptr, method, sampling_stride);\n cord.set_cordz_info(cordz_info);\n cordz_info->Track();\n}\nvoid CordzInfo::TrackCord(InlineData& cord, const InlineData& src,\n MethodIdentifier method) {\n assert(cord.is_tree());\n assert(src.is_tree());\n CordzInfo* cordz_info = cord.cordz_info();\n if (cordz_info != nullptr) cordz_info->Untrack();\n cordz_info = new CordzInfo(cord.as_tree(), src.cordz_info(), method,\n src.cordz_info()->sampling_stride());\n cord.set_cordz_info(cordz_info);\n cordz_info->Track();\n}\nvoid CordzInfo::MaybeTrackCordImpl(InlineData& cord, const InlineData& src,\n MethodIdentifier method) {\n if (src.is_profiled()) {\n TrackCord(cord, src, method);\n } else if (cord.is_profiled()) {\n cord.cordz_info()->Untrack();\n cord.clear_cordz_info();\n }\n}\nCordzInfo::MethodIdentifier CordzInfo::GetParentMethod(const CordzInfo* src) {\n if (src == nullptr) return MethodIdentifier::kUnknown;\n return src->parent_method_ != MethodIdentifier::kUnknown ? src->parent_method_\n : src->method_;\n}\nsize_t CordzInfo::FillParentStack(const CordzInfo* src, void** stack) {\n assert(stack);\n if (src == nullptr) return 0;\n if (src->parent_stack_depth_) {\n memcpy(stack, src->parent_stack_, src->parent_stack_depth_ * sizeof(void*));\n return src->parent_stack_depth_;\n }\n memcpy(stack, src->stack_, src->stack_depth_ * sizeof(void*));\n return src->stack_depth_;\n}\nCordzInfo::CordzInfo(CordRep* rep, const CordzInfo* src,\n MethodIdentifier method, int64_t sampling_stride)\n : rep_(rep),\n stack_depth_(\n static_cast(absl::GetStackTrace(stack_,\n kMaxStackDepth,\n 1))),\n parent_stack_depth_(FillParentStack(src, parent_stack_)),\n method_(method),\n parent_method_(GetParentMethod(src)),\n create_time_(absl::Now()),\n sampling_stride_(sampling_stride) {\n update_tracker_.LossyAdd(method);\n if (src) {\n update_tracker_.LossyAdd(src->update_tracker_);\n }\n}\nCordzInfo::~CordzInfo() {\n if (ABSL_PREDICT_FALSE(rep_)) {\n CordRep::Unref(rep_);\n }\n}\nvoid CordzInfo::Track() {\n SpinLockHolder l(&list_->mutex);\n CordzInfo* const head = list_->head.load(std::memory_order_acquire);\n if (head != nullptr) {\n head->ci_prev_.store(this, std::memory_order_release);\n }\n ci_next_.store(head, std::memory_order_release);\n list_->head.store(this, std::memory_order_release);\n}\nvoid CordzInfo::Untrack() {\n ODRCheck();\n {\n SpinLockHolder l(&list_->mutex);\n CordzInfo* const head = list_->head.load(std::memory_order_acquire);\n CordzInfo* const next = ci_next_.load(std::memory_order_acquire);\n CordzInfo* const prev = ci_prev_.load(std::memory_order_acquire);\n if (next) {\n ABSL_ASSERT(next->ci_prev_.load(std::memory_order_acquire) == this);\n next->ci_prev_.store(prev, std::memory_order_release);\n }\n if (prev) {\n ABSL_ASSERT(head != this);\n ABSL_ASSERT(prev->ci_next_.load(std::memory_order_acquire) == this);\n prev->ci_next_.store(next, std::memory_order_release);\n } else {\n ABSL_ASSERT(head == this);\n list_->head.store(next, std::memory_order_release);\n }\n }\n if (SafeToDelete()) {\n UnsafeSetCordRep(nullptr);\n delete this;\n return;\n }\n {\n absl::MutexLock lock(&mutex_);\n if (rep_) CordRep::Ref(rep_);\n }\n CordzHandle::Delete(this);\n}\nvoid CordzInfo::Lock(MethodIdentifier method)\n ABSL_EXCLUSIVE_LOCK_FUNCTION(mutex_) {\n mutex_.Lock();\n update_tracker_.LossyAdd(method);\n assert(rep_);\n}\nvoid CordzInfo::Unlock() ABSL_UNLOCK_FUNCTION(mutex_) {\n bool tracked = rep_ != nullptr;\n mutex_.Unlock();\n if (!tracked) {\n Untrack();\n }\n}\nabsl::Span CordzInfo::GetStack() const {\n return absl::MakeConstSpan(stack_, stack_depth_);\n}\nabsl::Span CordzInfo::GetParentStack() const {\n return absl::MakeConstSpan(parent_stack_, parent_stack_depth_);\n}\nCordzStatistics CordzInfo::GetCordzStatistics() const {\n CordzStatistics stats;\n stats.method = method_;\n stats.parent_method = parent_method_;\n stats.update_tracker = update_tracker_;\n if (CordRep* rep = RefCordRep()) {\n stats.size = rep->length;\n CordRepAnalyzer analyzer(stats);\n analyzer.AnalyzeCordRep(rep);\n CordRep::Unref(rep);\n }\n return stats;\n}\n} \nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/strings/internal/cordz_info.h\"\n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/base/config.h\"\n#include \"absl/debugging/stacktrace.h\"\n#include \"absl/debugging/symbolize.h\"\n#include \"absl/strings/cordz_test_helpers.h\"\n#include \"absl/strings/internal/cord_rep_flat.h\"\n#include \"absl/strings/internal/cordz_handle.h\"\n#include \"absl/strings/internal/cordz_statistics.h\"\n#include \"absl/strings/internal/cordz_update_tracker.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/types/span.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace cord_internal {\nnamespace {\nusing ::testing::ElementsAre;\nusing ::testing::Eq;\nusing ::testing::HasSubstr;\nusing ::testing::Ne;\nusing ::testing::SizeIs;\nauto constexpr kUnknownMethod = CordzUpdateTracker::kUnknown;\nauto constexpr kTrackCordMethod = CordzUpdateTracker::kConstructorString;\nauto constexpr kChildMethod = CordzUpdateTracker::kConstructorCord;\nauto constexpr kUpdateMethod = CordzUpdateTracker::kAppendString;\nstd::vector DeleteQueue() {\n return CordzHandle::DiagnosticsGetDeleteQueue();\n}\nstd::string FormatStack(absl::Span raw_stack) {\n static constexpr size_t buf_size = 1 << 14;\n std::unique_ptr buf(new char[buf_size]);\n std::string output;\n for (void* stackp : raw_stack) {\n if (absl::Symbolize(stackp, buf.get(), buf_size)) {\n absl::StrAppend(&output, \" \", buf.get(), \"\\n\");\n }\n }\n return output;\n}\nTEST(CordzInfoTest, TrackCord) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n ASSERT_THAT(info, Ne(nullptr));\n EXPECT_FALSE(info->is_snapshot());\n EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));\n EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));\n info->Untrack();\n}\nTEST(CordzInfoTest, MaybeTrackChildCordWithoutSampling) {\n CordzSamplingIntervalHelper sample_none(99999);\n TestCordData parent, child;\n CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);\n EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));\n}\nTEST(CordzInfoTest, MaybeTrackChildCordWithSampling) {\n CordzSamplingIntervalHelper sample_all(1);\n TestCordData parent, child;\n CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);\n EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));\n}\nTEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingParentSampled) {\n CordzSamplingIntervalHelper sample_none(99999);\n TestCordData parent, child;\n CordzInfo::TrackCord(parent.data, kTrackCordMethod, 1);\n CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);\n CordzInfo* parent_info = parent.data.cordz_info();\n CordzInfo* child_info = child.data.cordz_info();\n ASSERT_THAT(child_info, Ne(nullptr));\n EXPECT_THAT(child_info->GetCordRepForTesting(), Eq(child.rep.rep));\n EXPECT_THAT(child_info->GetParentStack(), parent_info->GetStack());\n parent_info->Untrack();\n child_info->Untrack();\n}\nTEST(CordzInfoTest, MaybeTrackChildCordWithoutSamplingChildSampled) {\n CordzSamplingIntervalHelper sample_none(99999);\n TestCordData parent, child;\n CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);\n CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);\n EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));\n}\nTEST(CordzInfoTest, MaybeTrackChildCordWithSamplingChildSampled) {\n CordzSamplingIntervalHelper sample_all(1);\n TestCordData parent, child;\n CordzInfo::TrackCord(child.data, kTrackCordMethod, 1);\n CordzInfo::MaybeTrackCord(child.data, parent.data, kTrackCordMethod);\n EXPECT_THAT(child.data.cordz_info(), Eq(nullptr));\n}\nTEST(CordzInfoTest, UntrackCord) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n info->Untrack();\n EXPECT_THAT(DeleteQueue(), SizeIs(0u));\n}\nTEST(CordzInfoTest, UntrackCordWithSnapshot) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n CordzSnapshot snapshot;\n info->Untrack();\n EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));\n EXPECT_THAT(info->GetCordRepForTesting(), Eq(data.rep.rep));\n EXPECT_THAT(DeleteQueue(), ElementsAre(info, &snapshot));\n}\nTEST(CordzInfoTest, SetCordRep) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n TestCordRep rep;\n info->Lock(CordzUpdateTracker::kAppendCord);\n info->SetCordRep(rep.rep);\n info->Unlock();\n EXPECT_THAT(info->GetCordRepForTesting(), Eq(rep.rep));\n info->Untrack();\n}\nTEST(CordzInfoTest, SetCordRepNullUntracksCordOnUnlock) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n info->Lock(CordzUpdateTracker::kAppendString);\n info->SetCordRep(nullptr);\n EXPECT_THAT(info->GetCordRepForTesting(), Eq(nullptr));\n EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(info));\n info->Unlock();\n EXPECT_THAT(CordzInfo::Head(CordzSnapshot()), Eq(nullptr));\n}\nTEST(CordzInfoTest, RefCordRep) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n size_t refcount = data.rep.rep->refcount.Get();\n EXPECT_THAT(info->RefCordRep(), Eq(data.rep.rep));\n EXPECT_THAT(data.rep.rep->refcount.Get(), Eq(refcount + 1));\n CordRep::Unref(data.rep.rep);\n info->Untrack();\n}\n#if GTEST_HAS_DEATH_TEST\nTEST(CordzInfoTest, SetCordRepRequiresMutex) {\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n TestCordRep rep;\n EXPECT_DEBUG_DEATH(info->SetCordRep(rep.rep), \".*\");\n info->Untrack();\n}\n#endif \nTEST(CordzInfoTest, TrackUntrackHeadFirstV2) {\n CordzSnapshot snapshot;\n EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info1 = data.data.cordz_info();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));\n EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));\n TestCordData data2;\n CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);\n CordzInfo* info2 = data2.data.cordz_info();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));\n EXPECT_THAT(info2->Next(snapshot), Eq(info1));\n EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));\n info2->Untrack();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));\n EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));\n info1->Untrack();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));\n}\nTEST(CordzInfoTest, TrackUntrackTailFirstV2) {\n CordzSnapshot snapshot;\n EXPECT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));\n TestCordData data;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info1 = data.data.cordz_info();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info1));\n EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));\n TestCordData data2;\n CordzInfo::TrackCord(data2.data, kTrackCordMethod, 1);\n CordzInfo* info2 = data2.data.cordz_info();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));\n EXPECT_THAT(info2->Next(snapshot), Eq(info1));\n EXPECT_THAT(info1->Next(snapshot), Eq(nullptr));\n info1->Untrack();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(info2));\n EXPECT_THAT(info2->Next(snapshot), Eq(nullptr));\n info2->Untrack();\n ASSERT_THAT(CordzInfo::Head(snapshot), Eq(nullptr));\n}\nTEST(CordzInfoTest, StackV2) {\n TestCordData data;\n static constexpr int kMaxStackDepth = 50;\n CordzInfo::TrackCord(data.data, kTrackCordMethod, 1);\n CordzInfo* info = data.data.cordz_info();\n std::vector local_stack;\n local_stack.resize(kMaxStackDepth);\n local_stack.resize(static_cast(\n absl::GetStackTrace(local_stack.data(), kMaxStackDepth,\n 1)));\n std::string got_stack = FormatStack(info->GetStack());\n std::string expected_stack = FormatStack(local_stack);\n EXPECT_THAT(got_stack, HasSubstr(expected_stack));\n info->Untrack();\n}\nCordzInfo* TrackChildCord(InlineData& data, const InlineData& parent) {\n CordzInfo::TrackCord(data, parent, kChildMethod);\n return data.cordz_info();\n}\nCordzInfo* TrackParentCord(InlineData& data) {\n CordzInfo::TrackCord(data, kTrackCordMethod, 1);\n return data.cordz_info();\n}\nTEST(CordzInfoTest, GetStatistics) {\n TestCordData data;\n CordzInfo* info = TrackParentCord(data.data);\n CordzStatistics statistics = info->GetCordzStatistics();\n EXPECT_THAT(statistics.size, Eq(data.rep.rep->length));\n EXPECT_THAT(statistics.method, Eq(kTrackCordMethod));\n EXPECT_THAT(statistics.parent_method, Eq(kUnknownMethod));\n EXPECT_THAT(statistics.update_tracker.Value(kTrackCordMethod), Eq(1));\n info->Untrack();\n}\nTEST(CordzInfoTest, LockCountsMethod) {\n TestCordData data;\n CordzInfo* info = TrackParentCord(data.data);\n info->Lock(kUpdateMethod);\n info->Unlock();\n info->Lock(kUpdateMethod);\n info->Unlock();\n CordzStatistics statistics = info->GetCordzStatistics();\n EXPECT_THAT(statistics.update_tracker.Value(kUpdateMethod), Eq(2));\n info->Untrack();\n}\nTEST(CordzInfoTest, FromParent) {\n TestCordData parent;\n TestCordData child;\n CordzInfo* info_parent = TrackParentCord(parent.data);\n CordzInfo* info_child = TrackChildCord(child.data, parent.data);\n std::string stack = FormatStack(info_parent->GetStack());\n std::string parent_stack = FormatStack(info_child->GetParentStack());\n EXPECT_THAT(stack, Eq(parent_stack));\n CordzStatistics statistics = info_child->GetCordzStatistics();\n EXPECT_THAT(statistics.size, Eq(child.rep.rep->length));\n EXPECT_THAT(statistics.method, Eq(kChildMethod));\n EXPECT_THAT(statistics.parent_method, Eq(kTrackCordMethod));\n EXPECT_THAT(statistics.update_tracker.Value(kChildMethod), Eq(1));\n info_parent->Untrack();\n info_child->Untrack();\n}\n} \n} \nABSL_NAMESPACE_END\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_info.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_info_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":1179,"cells":{"ID":{"kind":"string","value":"b838e648-4281-4f28-8219-c370a373b8cf"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"btree_scheduler"},"File Path in Repository":{"kind":"string","value":"quiche/common/btree_scheduler.h"},"File Path for Unit Test":{"kind":"string","value":"quiche/common/btree_scheduler_test.cc"},"Code":{"kind":"string","value":"#ifndef QUICHE_COMMON_BTREE_SCHEDULER_H_\n#define QUICHE_COMMON_BTREE_SCHEDULER_H_\n#include \n#include \n#include \n#include \n#include \"absl/base/attributes.h\"\n#include \"absl/container/btree_map.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"quiche/common/platform/api/quiche_bug_tracker.h\"\n#include \"quiche/common/platform/api/quiche_export.h\"\n#include \"quiche/common/platform/api/quiche_logging.h\"\nnamespace quiche {\ntemplate \nclass QUICHE_NO_EXPORT BTreeScheduler {\n public:\n bool HasRegistered() const { return !streams_.empty(); }\n bool HasScheduled() const { return !schedule_.empty(); }\n size_t NumScheduled() const { return schedule_.size(); }\n size_t NumRegistered() const { return streams_.size(); }\n size_t NumScheduledInPriorityRange(std::optional min,\n std::optional max) const;\n absl::StatusOr ShouldYield(Id id) const;\n std::optional GetPriorityFor(Id id) const {\n auto it = streams_.find(id);\n if (it == streams_.end()) {\n return std::nullopt;\n }\n return it->second.priority;\n }\n absl::StatusOr PopFront();\n absl::Status Register(Id stream_id, const Priority& priority);\n absl::Status Unregister(Id stream_id);\n absl::Status UpdatePriority(Id stream_id, const Priority& new_priority);\n absl::Status Schedule(Id stream_id);\n bool IsScheduled(Id stream_id) const;\n private:\n struct StreamEntry {\n ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Priority priority;\n std::optional current_sequence_number = std::nullopt;\n bool scheduled() const { return current_sequence_number.has_value(); }\n };\n using FullStreamEntry = std::pair;\n struct ScheduleKey {\n ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Priority priority;\n int sequence_number;\n bool operator<(const ScheduleKey& other) const {\n return std::make_tuple(priority, sequence_number) >\n std::make_tuple(other.priority, other.sequence_number);\n }\n static ScheduleKey MinForPriority(Priority priority) {\n return ScheduleKey{priority, std::numeric_limits::max()};\n }\n static ScheduleKey MaxForPriority(Priority priority) {\n return ScheduleKey{priority, std::numeric_limits::min()};\n }\n };\n using FullScheduleEntry = std::pair;\n using ScheduleIterator =\n typename absl::btree_map::const_iterator;\n static Id StreamId(const FullScheduleEntry& entry) {\n return entry.second->first;\n }\n absl::StatusOr DescheduleStream(const StreamEntry& entry);\n absl::node_hash_map streams_;\n absl::btree_map schedule_;\n int current_write_sequence_number_ = 0;\n};\ntemplate \nsize_t BTreeScheduler::NumScheduledInPriorityRange(\n std::optional min, std::optional max) const {\n if (min.has_value() && max.has_value()) {\n QUICHE_DCHECK(*min <= *max);\n }\n ScheduleIterator begin =\n max.has_value() ? schedule_.lower_bound(ScheduleKey::MinForPriority(*max))\n : schedule_.begin();\n ScheduleIterator end =\n min.has_value() ? schedule_.upper_bound(ScheduleKey::MaxForPriority(*min))\n : schedule_.end();\n return end - begin;\n}\ntemplate \nabsl::Status BTreeScheduler::Register(Id stream_id,\n const Priority& priority) {\n auto [it, success] = streams_.insert({stream_id, StreamEntry{priority}});\n if (!success) {\n return absl::AlreadyExistsError(\"ID already registered\");\n }\n return absl::OkStatus();\n}\ntemplate \nauto BTreeScheduler::DescheduleStream(const StreamEntry& entry)\n -> absl::StatusOr {\n QUICHE_DCHECK(entry.scheduled());\n auto it = schedule_.find(\n ScheduleKey{entry.priority, *entry.current_sequence_number});\n if (it == schedule_.end()) {\n return absl::InternalError(\n \"Calling DescheduleStream() on an entry that is not in the schedule at \"\n \"the expected key.\");\n }\n FullScheduleEntry result = *it;\n schedule_.erase(it);\n return result;\n}\ntemplate \nabsl::Status BTreeScheduler::Unregister(Id stream_id) {\n auto it = streams_.find(stream_id);\n if (it == streams_.end()) {\n return absl::NotFoundError(\"Stream not registered\");\n }\n const StreamEntry& stream = it->second;\n if (stream.scheduled()) {\n if (!DescheduleStream(stream).ok()) {\n QUICHE_BUG(BTreeSchedule_Unregister_NotInSchedule)\n << \"UnregisterStream() called on a stream ID \" << stream_id\n << \", which is marked ready, but is not in the schedule\";\n }\n }\n streams_.erase(it);\n return absl::OkStatus();\n}\ntemplate \nabsl::Status BTreeScheduler::UpdatePriority(\n Id stream_id, const Priority& new_priority) {\n auto it = streams_.find(stream_id);\n if (it == streams_.end()) {\n return absl::NotFoundError(\"ID not registered\");\n }\n StreamEntry& stream = it->second;\n std::optional sequence_number;\n if (stream.scheduled()) {\n absl::StatusOr old_entry = DescheduleStream(stream);\n if (old_entry.ok()) {\n sequence_number = old_entry->first.sequence_number;\n QUICHE_DCHECK_EQ(old_entry->second, &*it);\n } else {\n QUICHE_BUG(BTreeScheduler_Update_Not_In_Schedule)\n << \"UpdatePriority() called on a stream ID \" << stream_id\n << \", which is marked ready, but is not in the schedule\";\n }\n }\n stream.priority = new_priority;\n if (sequence_number.has_value()) {\n schedule_.insert({ScheduleKey{stream.priority, *sequence_number}, &*it});\n }\n return absl::OkStatus();\n}\ntemplate \nabsl::StatusOr BTreeScheduler::ShouldYield(\n Id stream_id) const {\n const auto stream_it = streams_.find(stream_id);\n if (stream_it == streams_.end()) {\n return absl::NotFoundError(\"ID not registered\");\n }\n const StreamEntry& stream = stream_it->second;\n if (schedule_.empty()) {\n return false;\n }\n const FullScheduleEntry& next = *schedule_.begin();\n if (StreamId(next) == stream_id) {\n return false;\n }\n return next.first.priority >= stream.priority;\n}\ntemplate \nabsl::StatusOr BTreeScheduler::PopFront() {\n if (schedule_.empty()) {\n return absl::NotFoundError(\"No streams scheduled\");\n }\n auto schedule_it = schedule_.begin();\n QUICHE_DCHECK(schedule_it->second->second.scheduled());\n schedule_it->second->second.current_sequence_number = std::nullopt;\n Id result = StreamId(*schedule_it);\n schedule_.erase(schedule_it);\n return result;\n}\ntemplate \nabsl::Status BTreeScheduler::Schedule(Id stream_id) {\n const auto stream_it = streams_.find(stream_id);\n if (stream_it == streams_.end()) {\n return absl::NotFoundError(\"ID not registered\");\n }\n if (stream_it->second.scheduled()) {\n return absl::OkStatus();\n }\n auto [schedule_it, success] =\n schedule_.insert({ScheduleKey{stream_it->second.priority,\n --current_write_sequence_number_},\n &*stream_it});\n QUICHE_BUG_IF(WebTransportWriteBlockedList_AddStream_conflict, !success)\n << \"Conflicting key in scheduler for stream \" << stream_id;\n stream_it->second.current_sequence_number =\n schedule_it->first.sequence_number;\n return absl::OkStatus();\n}\ntemplate \nbool BTreeScheduler::IsScheduled(Id stream_id) const {\n const auto stream_it = streams_.find(stream_id);\n if (stream_it == streams_.end()) {\n return false;\n }\n return stream_it->second.scheduled();\n}\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/common/btree_scheduler.h\"\n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"quiche/common/platform/api/quiche_test.h\"\n#include \"quiche/common/test_tools/quiche_test_utils.h\"\nnamespace quiche::test {\nnamespace {\nusing ::testing::ElementsAre;\nusing ::testing::Optional;\ntemplate \nvoid ScheduleIds(BTreeScheduler& scheduler,\n absl::Span ids) {\n for (Id id : ids) {\n QUICHE_EXPECT_OK(scheduler.Schedule(id));\n }\n}\ntemplate \nstd::vector PopAll(BTreeScheduler& scheduler) {\n std::vector result;\n result.reserve(scheduler.NumScheduled());\n for (;;) {\n absl::StatusOr id = scheduler.PopFront();\n if (id.ok()) {\n result.push_back(*id);\n } else {\n EXPECT_THAT(id, StatusIs(absl::StatusCode::kNotFound));\n break;\n }\n }\n return result;\n}\nTEST(BTreeSchedulerTest, SimplePop) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 100));\n QUICHE_EXPECT_OK(scheduler.Register(2, 101));\n QUICHE_EXPECT_OK(scheduler.Register(3, 102));\n EXPECT_THAT(scheduler.GetPriorityFor(1), Optional(100));\n EXPECT_THAT(scheduler.GetPriorityFor(3), Optional(102));\n EXPECT_EQ(scheduler.GetPriorityFor(5), std::nullopt);\n EXPECT_EQ(scheduler.NumScheduled(), 0u);\n EXPECT_FALSE(scheduler.HasScheduled());\n QUICHE_EXPECT_OK(scheduler.Schedule(1));\n QUICHE_EXPECT_OK(scheduler.Schedule(2));\n QUICHE_EXPECT_OK(scheduler.Schedule(3));\n EXPECT_EQ(scheduler.NumScheduled(), 3u);\n EXPECT_TRUE(scheduler.HasScheduled());\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(2));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));\n QUICHE_EXPECT_OK(scheduler.Schedule(2));\n QUICHE_EXPECT_OK(scheduler.Schedule(1));\n QUICHE_EXPECT_OK(scheduler.Schedule(3));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(2));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));\n QUICHE_EXPECT_OK(scheduler.Schedule(3));\n QUICHE_EXPECT_OK(scheduler.Schedule(1));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(3));\n EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(1));\n}\nTEST(BTreeSchedulerTest, FIFO) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 100));\n QUICHE_EXPECT_OK(scheduler.Register(2, 100));\n QUICHE_EXPECT_OK(scheduler.Register(3, 100));\n ScheduleIds(scheduler, {2, 1, 3});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(2, 1, 3));\n QUICHE_EXPECT_OK(scheduler.Register(4, 101));\n QUICHE_EXPECT_OK(scheduler.Register(5, 99));\n ScheduleIds(scheduler, {5, 1, 2, 3, 4});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 1, 2, 3, 5));\n ScheduleIds(scheduler, {1, 5, 2, 4, 3});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 1, 2, 3, 5));\n ScheduleIds(scheduler, {3, 5, 2, 4, 1});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(4, 3, 2, 1, 5));\n ScheduleIds(scheduler, {3, 2, 1, 2, 3});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 2, 1));\n}\nTEST(BTreeSchedulerTest, NumEntriesInRange) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n QUICHE_EXPECT_OK(scheduler.Register(3, 0));\n QUICHE_EXPECT_OK(scheduler.Register(4, -2));\n QUICHE_EXPECT_OK(scheduler.Register(5, -5));\n QUICHE_EXPECT_OK(scheduler.Register(6, 10));\n QUICHE_EXPECT_OK(scheduler.Register(7, 16));\n QUICHE_EXPECT_OK(scheduler.Register(8, 32));\n QUICHE_EXPECT_OK(scheduler.Register(9, 64));\n EXPECT_EQ(scheduler.NumScheduled(), 0u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, std::nullopt),\n 0u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(-1, 1), 0u);\n for (int stream = 1; stream <= 9; ++stream) {\n QUICHE_ASSERT_OK(scheduler.Schedule(stream));\n }\n EXPECT_EQ(scheduler.NumScheduled(), 9u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, std::nullopt),\n 9u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(0, 0), 3u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(std::nullopt, -1), 2u);\n EXPECT_EQ(scheduler.NumScheduledInPriorityRange(1, std::nullopt), 4u);\n}\nTEST(BTreeSchedulerTest, Registration) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n QUICHE_EXPECT_OK(scheduler.Schedule(1));\n QUICHE_EXPECT_OK(scheduler.Schedule(2));\n EXPECT_EQ(scheduler.NumScheduled(), 2u);\n EXPECT_TRUE(scheduler.IsScheduled(2));\n EXPECT_THAT(scheduler.Register(2, 0),\n StatusIs(absl::StatusCode::kAlreadyExists));\n QUICHE_EXPECT_OK(scheduler.Unregister(2));\n EXPECT_EQ(scheduler.NumScheduled(), 1u);\n EXPECT_FALSE(scheduler.IsScheduled(2));\n EXPECT_THAT(scheduler.UpdatePriority(2, 1234),\n StatusIs(absl::StatusCode::kNotFound));\n EXPECT_THAT(scheduler.Unregister(2), StatusIs(absl::StatusCode::kNotFound));\n EXPECT_THAT(scheduler.Schedule(2), StatusIs(absl::StatusCode::kNotFound));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n EXPECT_EQ(scheduler.NumScheduled(), 1u);\n EXPECT_TRUE(scheduler.IsScheduled(1));\n EXPECT_FALSE(scheduler.IsScheduled(2));\n}\nTEST(BTreeSchedulerTest, UpdatePriorityUp) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n QUICHE_EXPECT_OK(scheduler.Register(3, 0));\n ScheduleIds(scheduler, {1, 2, 3});\n QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 1000));\n EXPECT_THAT(PopAll(scheduler), ElementsAre(2, 1, 3));\n}\nTEST(BTreeSchedulerTest, UpdatePriorityDown) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n QUICHE_EXPECT_OK(scheduler.Register(3, 0));\n ScheduleIds(scheduler, {1, 2, 3});\n QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, -1000));\n EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 3, 2));\n}\nTEST(BTreeSchedulerTest, UpdatePriorityEqual) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, 0));\n QUICHE_EXPECT_OK(scheduler.Register(3, 0));\n ScheduleIds(scheduler, {1, 2, 3});\n QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 0));\n EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 2, 3));\n}\nTEST(BTreeSchedulerTest, UpdatePriorityIntoSameBucket) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(1, 0));\n QUICHE_EXPECT_OK(scheduler.Register(2, -100));\n QUICHE_EXPECT_OK(scheduler.Register(3, 0));\n ScheduleIds(scheduler, {1, 2, 3});\n QUICHE_EXPECT_OK(scheduler.UpdatePriority(2, 0));\n EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 2, 3));\n}\nTEST(BTreeSchedulerTest, ShouldYield) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(10, 100));\n QUICHE_EXPECT_OK(scheduler.Register(20, 101));\n QUICHE_EXPECT_OK(scheduler.Register(21, 101));\n QUICHE_EXPECT_OK(scheduler.Register(30, 102));\n EXPECT_THAT(scheduler.ShouldYield(10), IsOkAndHolds(false));\n EXPECT_THAT(scheduler.ShouldYield(20), IsOkAndHolds(false));\n EXPECT_THAT(scheduler.ShouldYield(21), IsOkAndHolds(false));\n EXPECT_THAT(scheduler.ShouldYield(30), IsOkAndHolds(false));\n EXPECT_THAT(scheduler.ShouldYield(40), StatusIs(absl::StatusCode::kNotFound));\n QUICHE_EXPECT_OK(scheduler.Schedule(20));\n EXPECT_THAT(scheduler.ShouldYield(10), IsOkAndHolds(true));\n EXPECT_THAT(scheduler.ShouldYield(20), IsOkAndHolds(false));\n EXPECT_THAT(scheduler.ShouldYield(21), IsOkAndHolds(true));\n EXPECT_THAT(scheduler.ShouldYield(30), IsOkAndHolds(false));\n}\nstruct CustomPriority {\n int a;\n int b;\n bool operator<(const CustomPriority& other) const {\n return std::make_tuple(a, b) < std::make_tuple(other.a, other.b);\n }\n};\nTEST(BTreeSchedulerTest, CustomPriority) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(10, CustomPriority{0, 1}));\n QUICHE_EXPECT_OK(scheduler.Register(11, CustomPriority{0, 0}));\n QUICHE_EXPECT_OK(scheduler.Register(12, CustomPriority{0, 0}));\n QUICHE_EXPECT_OK(scheduler.Register(13, CustomPriority{10, 0}));\n QUICHE_EXPECT_OK(scheduler.Register(14, CustomPriority{-10, 0}));\n ScheduleIds(scheduler, {10, 11, 12, 13, 14});\n EXPECT_THAT(PopAll(scheduler), ElementsAre(13, 10, 11, 12, 14));\n}\nstruct CustomId {\n int a;\n std::string b;\n bool operator==(const CustomId& other) const {\n return a == other.a && b == other.b;\n }\n template \n friend H AbslHashValue(H h, const CustomId& c) {\n return H::combine(std::move(h), c.a, c.b);\n }\n};\nstd::ostream& operator<<(std::ostream& os, const CustomId& id) {\n os << id.a << \":\" << id.b;\n return os;\n}\nTEST(BTreeSchedulerTest, CustomIds) {\n BTreeScheduler scheduler;\n QUICHE_EXPECT_OK(scheduler.Register(CustomId{1, \"foo\"}, 10));\n QUICHE_EXPECT_OK(scheduler.Register(CustomId{1, \"bar\"}, 12));\n QUICHE_EXPECT_OK(scheduler.Register(CustomId{2, \"foo\"}, 11));\n EXPECT_THAT(scheduler.Register(CustomId{1, \"foo\"}, 10),\n StatusIs(absl::StatusCode::kAlreadyExists));\n ScheduleIds(scheduler,\n {CustomId{1, \"foo\"}, CustomId{1, \"bar\"}, CustomId{2, \"foo\"}});\n EXPECT_THAT(scheduler.ShouldYield(CustomId{1, \"foo\"}), IsOkAndHolds(true));\n EXPECT_THAT(scheduler.ShouldYield(CustomId{1, \"bar\"}), IsOkAndHolds(false));\n EXPECT_THAT(\n PopAll(scheduler),\n ElementsAre(CustomId{1, \"bar\"}, CustomId{2, \"foo\"}, CustomId{1, \"foo\"}));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/btree_scheduler.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/btree_scheduler_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":1180,"cells":{"ID":{"kind":"string","value":"7bd23866-5039-481b-92b9-5f3198d60d37"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reshapex4"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/delegates/gpu/common/tasks/reshapex4.h\"\n#include \n#include \"tensorflow/lite/delegates/gpu/common/task/work_group_picking.h\"\nnamespace tflite {\nnamespace gpu {\nnamespace {\nstd::string GetReshapeCode(const OperationDef& op_def) {\n std::string c;\n c += \"MAIN_FUNCTION($0) {\\n\";\n if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {\n c += \" int linear_id = GLOBAL_ID_0;\\n\";\n c += \" int X = linear_id / args.dst_tensor.Batch();\\n\";\n c += \" int B = linear_id % args.dst_tensor.Batch();\\n\";\n c += \" args.dst_tensor.SetBatchRef(B);\\n\";\n } else {\n c += \" int X = GLOBAL_ID_0;\\n\";\n }\n c += \" int Y = GLOBAL_ID_1;\\n\";\n c += \" int Z = GLOBAL_ID_2;\\n\";\n c += \" if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || \"\n \"Z >= args.dst_tensor.Slices()) { \\n\";\n c += \" return; \\n\";\n c += \" } \\n\";\n if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {\n c += \" int dst_bhwc4 = B;\\n\";\n } else {\n c += \" int dst_bhwc4 = 0;\\n\";\n }\n c += \" dst_bhwc4 = ((dst_bhwc4 * args.dst_tensor.Height() + Y) * \"\n \"args.dst_tensor.Width() + X) * args.dst_tensor.Slices() + Z;\\n\";\n c += \" int src_z = dst_bhwc4 % args.src_tensor.Slices();\\n\";\n c += \" dst_bhwc4 = dst_bhwc4 / args.src_tensor.Slices();\\n\";\n c += \" int src_x = dst_bhwc4 % args.src_tensor.Width();\\n\";\n c += \" dst_bhwc4 = dst_bhwc4 / args.src_tensor.Width();\\n\";\n c += \" int src_y = dst_bhwc4 % args.src_tensor.Height();\\n\";\n if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {\n c += \" int src_b = dst_bhwc4 / args.src_tensor.Height();\\n\";\n c += \" args.src_tensor.SetBatchRef(src_b);\\n\";\n }\n c += \" args.src_tensor::type result = args.src_tensor.Read(src_x, src_y, \"\n \"src_z);\\n\";\n c += \" args.dst_tensor.Write(result, X, Y, Z);\\n\";\n c += \"}\\n\";\n return c;\n}\n} \nGPUOperation CreateReshapex4(const OperationDef& definition) {\n GPUOperation op(definition);\n op.AddSrcTensor(\"src_tensor\", definition.src_tensors[0]);\n op.AddDstTensor(\"dst_tensor\", definition.dst_tensors[0]);\n op.code_ = GetReshapeCode(definition);\n op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;\n return op;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \"tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h\"\n#include \"tensorflow/lite/delegates/gpu/common/operations.h\"\n#include \"tensorflow/lite/delegates/gpu/common/status.h\"\n#include \"tensorflow/lite/delegates/gpu/common/tasks/reshape_test_util.h\"\nnamespace tflite {\nnamespace gpu {\nnamespace cl {\nnamespace {\nTEST_F(OpenCLOperationTest, Reshapex4) {\n auto status = Reshapex4Test(&exec_env_);\n ASSERT_TRUE(status.ok()) << status.message();\n}\n} \n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1181,"cells":{"ID":{"kind":"string","value":"06a605f6-b2f0-43a5-adfd-11795f4448f4"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"scatter"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h\"\n#include \n#include \n#include \"llvm/ADT/ArrayRef.h\"\n#include \"llvm/ADT/STLExtras.h\"\n#include \"llvm/ADT/Sequence.h\"\n#include \"llvm/ADT/SmallVector.h\"\n#include \"mlir/IR/BuiltinAttributes.h\" \n#include \"mlir/IR/BuiltinTypeInterfaces.h\" \n#include \"mlir/IR/ImplicitLocOpBuilder.h\" \n#include \"mlir/IR/Operation.h\" \n#include \"mlir/IR/ValueRange.h\" \n#include \"mlir/Support/LLVM.h\" \n#include \"mlir/Support/LogicalResult.h\" \n#include \"mlir/Transforms/DialectConversion.h\" \n#include \"tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h\"\n#include \"tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h\"\n#include \"xla/mlir_hlo/mhlo/IR/hlo_ops.h\"\nnamespace mlir {\nnamespace odml {\nLogicalResult CanonicalizeScatterUpdates(\n Operation* scatter_op, llvm::ArrayRef update_window_dims,\n const Value& indices, const ShapedType& indices_type, Value& updates,\n ShapedType& updates_type, ConversionPatternRewriter& rewriter) {\n auto canonical_update_window_dims = llvm::to_vector(\n llvm::seq(indices_type.getRank() - 1, updates_type.getRank()));\n if (canonical_update_window_dims == update_window_dims) return success();\n if (!IsIotaAttr(update_window_dims, update_window_dims.size()))\n return rewriter.notifyMatchFailure(\n scatter_op, \"update_window_dims are not leading or trailing indices\");\n SmallVector permutation_array(updates_type.getRank());\n int64_t dim = 0;\n const auto permutation_array_size = permutation_array.size();\n for (int64_t i = update_window_dims.size(); i < permutation_array_size; ++i) {\n permutation_array[i] = dim;\n ++dim;\n }\n for (int64_t i = 0; i < update_window_dims.size(); ++i) {\n permutation_array[i] = dim;\n ++dim;\n }\n auto permutation_and_shape = GetPermutationAndTransposedShape(\n permutation_array, updates_type, rewriter);\n auto transposed_updates = rewriter.create(\n scatter_op->getLoc(), permutation_and_shape.shape, updates,\n permutation_and_shape.permutation);\n updates = transposed_updates;\n updates_type = permutation_and_shape.shape;\n return success();\n}\ntemplate \nLogicalResult ConvertScatterOp::matchAndRewrite(\n mhlo::ScatterOp scatter_op, OpAdaptor adaptor,\n ConversionPatternRewriter& rewriter) const {\n OperandRange operands = scatter_op.getInputs();\n Value indices = scatter_op.getScatterIndices();\n OperandRange updates = scatter_op.getUpdates();\n if (operands.size() != 1 || updates.size() != 1) return failure();\n ShapedType operand_type = mlir::cast(operands[0].getType());\n ShapedType indices_type = mlir::cast(indices.getType());\n ShapedType updates_type = mlir::cast(updates[0].getType());\n Value new_updates = updates[0];\n if (!operand_type.hasStaticShape() || !indices_type.hasStaticShape() ||\n !updates_type.hasStaticShape()) {\n return failure();\n }\n if (failed(MatchBinaryReduceFunction(\n scatter_op.getUpdateComputation()))) {\n return failure();\n }\n auto scatter_dimension_numbers = scatter_op.getScatterDimensionNumbers();\n int64_t index_vector_dim = scatter_dimension_numbers.getIndexVectorDim();\n if (failed(NormalizeIndexVector(scatter_op, indices, indices_type,\n index_vector_dim, rewriter))) {\n return failure();\n }\n auto update_window_dims = scatter_dimension_numbers.getUpdateWindowDims();\n if (failed(CanonicalizeScatterUpdates(scatter_op, update_window_dims, indices,\n indices_type, new_updates, updates_type,\n rewriter))) {\n return failure();\n }\n auto inserted_window_dims = scatter_dimension_numbers.getInsertedWindowDims();\n auto scatter_dims_to_operand_dims =\n scatter_dimension_numbers.getScatterDimsToOperandDims();\n if (IsIotaAttr(inserted_window_dims, indices_type.getShape().back()) &&\n IsIotaAttr(scatter_dims_to_operand_dims,\n indices_type.getShape().back())) {\n rewriter.replaceOpWithNewOp(scatter_op,\n scatter_op.getResult(0).getType(),\n operands[0], indices, new_updates);\n return success();\n }\n if (scatter_dims_to_operand_dims != inserted_window_dims) {\n return rewriter.notifyMatchFailure(\n scatter_op, \"unsupported scatter_dims_to_operand_dims\");\n }\n SmallVector permutation_array;\n for (int64_t i = 0; i < scatter_dims_to_operand_dims.size(); ++i) {\n permutation_array.push_back(scatter_dims_to_operand_dims[i]);\n }\n for (int64_t i = 0; i < operand_type.getRank(); ++i) {\n if (!llvm::is_contained(scatter_dims_to_operand_dims, i)) {\n permutation_array.push_back(i);\n }\n }\n auto permutation_and_shape = GetPermutationAndTransposedShape(\n permutation_array, operand_type, rewriter);\n Location loc = scatter_op.getLoc();\n auto transposed_operand = rewriter.create(\n loc, permutation_and_shape.shape, operands[0],\n permutation_and_shape.permutation);\n Value new_indices = indices;\n int64_t index_depth =\n permutation_and_shape.shape.getRank() - inserted_window_dims.size();\n int64_t num_updates = indices_type.getDimSize(0);\n if (std::is_same::value &&\n indices_type.getRank() == 1 && updates_type.getRank() == 1 &&\n index_depth == 1 && num_updates == 1) {\n ImplicitLocOpBuilder builder(loc, rewriter);\n auto indices_shape = BuildIntArrayConstOp(\n builder, rewriter,\n llvm::SmallVector({num_updates, index_depth}),\n rewriter.getI32Type());\n new_indices = rewriter.create(\n loc,\n RankedTensorType::get({num_updates, index_depth},\n indices_type.getElementType()),\n indices, indices_shape);\n auto updates_shape = BuildIntArrayConstOp(\n builder, rewriter,\n llvm::SmallVector({num_updates, updates_type.getDimSize(0)}),\n rewriter.getI32Type());\n new_updates = rewriter.create(\n loc,\n RankedTensorType::get({1, updates_type.getDimSize(0)},\n updates_type.getElementType()),\n new_updates, updates_shape);\n }\n auto tf_scatter_op =\n rewriter.create(loc, permutation_and_shape.shape,\n transposed_operand, new_indices, new_updates);\n auto inverse_permutation = GetInversePermutation(permutation_array, rewriter);\n rewriter.replaceOpWithNewOp(\n scatter_op, scatter_op.getResult(0).getType(), tf_scatter_op,\n inverse_permutation);\n return success();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/fusions/legacy/scatter.h\"\n#include \n#include \n#include \n#include \n#include \"mlir/IR/MLIRContext.h\"\n#include \"xla/service/gpu/fusions/fusions.h\"\n#include \"xla/service/gpu/gpu_device_info_for_tests.h\"\n#include \"xla/service/gpu/hlo_fusion_analysis.h\"\n#include \"xla/service/gpu/model/indexing_map_serialization.h\"\n#include \"xla/service/gpu/model/indexing_test_utils.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nclass ScatterFusionTest : public HloTestBase {\n DebugOptions GetDebugOptionsForTest() override {\n auto opts = HloTestBase::GetDebugOptionsForTest();\n opts.set_xla_gpu_mlir_emitter_level(0);\n return opts;\n }\n protected:\n mlir::MLIRContext mlir_context_;\n};\nTEST_F(ScatterFusionTest, ScatterFusion) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule module\n add (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n }\n fused_computation {\n %input = f32[2,9] parameter(0)\n %indices = s32[3] parameter(1)\n %updates = f32[3,9] parameter(2)\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n }\n ENTRY entry {\n %input = f32[2,9] parameter(0)\n %indices = s32[3] parameter(1)\n %updates = f32[3,9] parameter(2)\n ROOT %fusion = f32[2,9] fusion(%input, %indices, %updates), kind=kLoop, calls=fused_computation\n })\")\n .value();\n stream_executor::DeviceDescription device_info =\n TestGpuDeviceInfo::RTXA6000DeviceInfo();\n auto* root = module->entry_computation()->root_instruction();\n auto analysis_fused = HloFusionAnalysis::Create(*root, device_info);\n auto emitter =\n GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});\n auto scatter_fusion = dynamic_cast(emitter.get());\n ASSERT_NE(scatter_fusion, nullptr);\n EXPECT_EQ(scatter_fusion->launch_dimensions().launch_bound(),\n 3 * 9 );\n}\nTEST_F(ScatterFusionTest, ThreadIdIndexing) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\n HloModule module\n computation {\n %p0 = f32[] parameter(0)\n %p1 = f32[] parameter(1)\n %p2 = f32[] parameter(2)\n %p3 = f32[] parameter(3)\n ROOT %tuple = (f32[], f32[]) tuple(f32[] %p2, f32[] %p3)\n }\n scatter {\n %operand0 = f32[300,200] parameter(0)\n %operand1 = f32[300,200] parameter(1)\n %indices = s32[42,1] parameter(2)\n %update.1 = f32[42,10,20] parameter(3)\n %update.2 = f32[42,10,20]parameter(4)\n ROOT %scatter = (f32[300,200], f32[300,200]) scatter(\n f32[300,200] %operand0,\n f32[300,200] %operand1,\n s32[42,1] %indices,\n f32[42,10,20] %update.1,\n f32[42,10,20] %update.2\n ),\n update_window_dims={1,2},\n inserted_window_dims={},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n to_apply=computation\n }\n ENTRY entry {\n %operand0 = f32[300,200] parameter(0)\n %operand1 = f32[300,200] parameter(1)\n %indices = s32[42,1] parameter(2)\n %update.1 = f32[42,10,20] parameter(3)\n %update.2 = f32[42,10,20]parameter(4)\n ROOT %fusion = (f32[300,200], f32[300,200]) fusion(\n %operand0, %operand1, %indices, %update.1, %update.2),\n kind=kLoop, calls=scatter\n }\n )\"));\n stream_executor::DeviceDescription device_info =\n TestGpuDeviceInfo::RTXA6000DeviceInfo();\n auto* root = module->entry_computation()->root_instruction();\n auto analysis_fused = HloFusionAnalysis::Create(*root, device_info);\n auto emitter =\n GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});\n auto fusion = dynamic_cast(emitter.get());\n ASSERT_NE(fusion, nullptr);\n constexpr auto kUpdatesIndexing = R\"(\n (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (\n (bl_x * 128 + th_x) floordiv 200,\n ((bl_x * 128 + th_x) floordiv 20) mod 10,\n (bl_x * 128 + th_x) mod 20\n ),\n domain:\n th_x in [0, 127],\n th_y in [0, 0],\n th_z in [0, 0],\n bl_x in [0, 65],\n bl_y in [0, 0],\n bl_z in [0, 0],\n chunk_id in [0, 0],\n unroll_id in [0, 0],\n bl_x * 128 + th_x in [0, 8399]\n )\";\n mlir::SmallVector dim_names = {\"th_x\", \"th_y\", \"th_z\",\n \"bl_x\", \"bl_y\", \"bl_z\"};\n mlir::SmallVector range_names = {\"chunk_id\", \"unroll_id\"};\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 0, 3, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kUpdatesIndexing));\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 0, 4, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kUpdatesIndexing));\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 1, 3, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kUpdatesIndexing));\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 1, 4, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kUpdatesIndexing));\n range_names.push_back(\"index_id\");\n constexpr auto kIndicesIndexing = R\"(\n (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id, index_id] ->\n ((bl_x * 128 + th_x) floordiv 200, 0),\n domain:\n th_x in [0, 127],\n th_y in [0, 0],\n th_z in [0, 0],\n bl_x in [0, 65],\n bl_y in [0, 0],\n bl_z in [0, 0],\n chunk_id in [0, 0],\n unroll_id in [0, 0],\n index_id in [0, 0],\n bl_x * 128 + th_x in [0, 8399]\n )\";\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 0, 2, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kIndicesIndexing));\n EXPECT_THAT(\n ToString(*fusion->ComputeThreadIdToInputIndexing(\n 1, 2, &mlir_context_),\n dim_names, range_names, {}),\n MatchIndexingString(kIndicesIndexing));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1182,"cells":{"ID":{"kind":"string","value":"c1f8f33e-1147-4fa1-b5d6-6457a9b6e08e"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"flatbuffer_conversions"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/core/api/flatbuffer_conversions.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/core/api/flatbuffer_conversions_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/core/api/flatbuffer_conversions.h\"\n#include \n#include \n#include \n#include \n#include \"flatbuffers/vector.h\" \n#include \"tensorflow/compiler/mlir/lite/core/api/error_reporter.h\"\n#include \"tensorflow/lite/core/c/builtin_op_data.h\"\n#include \"tensorflow/lite/core/c/common.h\"\n#include \"tensorflow/lite/kernels/internal/compatibility.h\"\n#include \"tensorflow/lite/schema/schema_generated.h\"\nnamespace tflite {\nnamespace {\nclass SafeBuiltinDataAllocator {\n public:\n class BuiltinDataDeleter {\n public:\n explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)\n : allocator_(allocator) {}\n void operator()(void* data) { allocator_->Deallocate(data); }\n private:\n BuiltinDataAllocator* allocator_;\n };\n template \n using BuiltinDataPtr = std::unique_ptr;\n explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)\n : allocator_(allocator) {}\n template \n BuiltinDataPtr Allocate() {\n return BuiltinDataPtr(allocator_->AllocatePOD(),\n BuiltinDataDeleter(allocator_));\n }\n private:\n BuiltinDataAllocator* allocator_;\n};\nvoid CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n TFLITE_DCHECK(op != nullptr);\n TFLITE_DCHECK(error_reporter != nullptr);\n TFLITE_DCHECK(allocator != nullptr);\n TFLITE_DCHECK(builtin_data != nullptr);\n}\ntemplate \nstatic TfLiteStatus FlatBufferIntVectorToArray(\n int max_size_of_buffer, const flatbuffers::Vector* flat_vector,\n DataType* buffer, ErrorReporter* error_reporter, const char* op_name) {\n if (!flat_vector) {\n TF_LITE_REPORT_ERROR(error_reporter,\n \"Input array not provided for operation '%s'.\\n\",\n op_name);\n return kTfLiteError;\n } else {\n size_t num_dimensions = flat_vector->size();\n if (num_dimensions > max_size_of_buffer / sizeof(DataType)) {\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"Found too many dimensions in the input array of operation '%s'.\\n\",\n op_name);\n return kTfLiteError;\n } else {\n for (size_t i = 0; i < num_dimensions; ++i) {\n buffer[i] = flat_vector->Get(i);\n }\n }\n }\n return kTfLiteOk;\n}\nTfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {\n switch (activation) {\n case ActivationFunctionType_NONE:\n return kTfLiteActNone;\n case ActivationFunctionType_RELU:\n return kTfLiteActRelu;\n case ActivationFunctionType_RELU_N1_TO_1:\n return kTfLiteActReluN1To1;\n case ActivationFunctionType_RELU6:\n return kTfLiteActRelu6;\n case ActivationFunctionType_TANH:\n return kTfLiteActTanh;\n case ActivationFunctionType_SIGN_BIT:\n return kTfLiteActSignBit;\n }\n return kTfLiteActNone;\n}\nTfLitePadding ConvertPadding(Padding padding) {\n switch (padding) {\n case Padding_SAME:\n return kTfLitePaddingSame;\n case Padding_VALID:\n return kTfLitePaddingValid;\n }\n return kTfLitePaddingUnknown;\n}\nTfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) {\n switch (padding) {\n case MirrorPadMode_REFLECT:\n return kTfLiteMirrorPaddingReflect;\n case MirrorPadMode_SYMMETRIC:\n return kTfLiteMirrorPaddingSymmetric;\n }\n return kTfLiteMirrorPaddingUnknown;\n}\nTfLiteRngAlgorithm ConvertRngAlgorithm(RngAlgorithm algorithm) {\n switch (algorithm) {\n case RngAlgorithm_THREEFRY:\n return kTfLiteRngAlgorithmThreefry;\n case RngAlgorithm_PHILOX:\n return kTfLiteRngAlgorithmPhilox;\n case RngAlgorithm_DEFAULT:\n return kTfLiteRngAlgorithmDefault;\n }\n return kTfLiteRngAlgorithmUnknown;\n}\n#ifndef TF_LITE_STATIC_MEMORY\nTfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n auto parseLSHProjectionType = [](LSHProjectionType type) {\n switch (type) {\n case LSHProjectionType_SPARSE:\n return kTfLiteLshProjectionSparse;\n case LSHProjectionType_DENSE:\n return kTfLiteLshProjectionDense;\n default:\n return kTfLiteLshProjectionUnknown;\n }\n };\n auto parseCombinerType = [](CombinerType type) {\n switch (type) {\n case CombinerType_MEAN:\n return kTfLiteCombinerTypeMean;\n case CombinerType_SQRTN:\n return kTfLiteCombinerTypeSqrtn;\n case CombinerType_SUM:\n default:\n return kTfLiteCombinerTypeSum;\n }\n };\n SafeBuiltinDataAllocator safe_allocator(allocator);\n *builtin_data = nullptr;\n switch (op_type) {\n case BuiltinOperator_ABS: {\n return ParseAbs(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ADD: {\n return ParseAdd(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ADD_N: {\n return ParseAddN(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ARG_MAX: {\n return ParseArgMax(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ARG_MIN: {\n return ParseArgMin(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ASSIGN_VARIABLE: {\n return ParseAssignVariable(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_AVERAGE_POOL_2D: {\n return ParsePool(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_BATCH_MATMUL: {\n return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_BATCH_TO_SPACE_ND: {\n return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_BROADCAST_ARGS: {\n return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_BROADCAST_TO: {\n return ParseBroadcastTo(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CALL_ONCE: {\n return ParseCallOnce(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CEIL: {\n return ParseCeil(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CONCATENATION: {\n return ParseConcatenation(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CONV_2D: {\n return ParseConv2D(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CUMSUM: {\n return ParseCumsum(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_DEPTH_TO_SPACE: {\n return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_DEPTHWISE_CONV_2D: {\n return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_DEQUANTIZE: {\n return ParseDequantize(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_DIV: {\n return ParseDiv(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ELU: {\n return ParseElu(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_EMBEDDING_LOOKUP: {\n return ParseEmbeddingLookup(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_EXP: {\n return ParseExp(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_EXPAND_DIMS: {\n return ParseExpandDims(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_FILL: {\n return ParseFill(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_FLOOR: {\n return ParseFloor(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_FLOOR_DIV: {\n return ParseFloorDiv(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_FLOOR_MOD: {\n return ParseFloorMod(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_FULLY_CONNECTED: {\n return ParseFullyConnected(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_GATHER_ND: {\n return ParseGatherNd(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_GREATER: {\n return ParseGreater(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_GREATER_EQUAL: {\n return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_HARD_SWISH: {\n return ParseHardSwish(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_L2_NORMALIZATION: {\n return ParseL2Normalization(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_L2_POOL_2D: {\n return ParsePool(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LEAKY_RELU: {\n return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LESS: {\n return ParseLess(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LESS_EQUAL: {\n return ParseLessEqual(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOG: {\n return ParseLog(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOGICAL_AND: {\n return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOGICAL_NOT: {\n return ParseLogicalNot(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOGICAL_OR: {\n return ParseLogicalOr(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOGISTIC: {\n return ParseLogistic(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LOG_SOFTMAX: {\n return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LSTM: {\n return ParseLSTM(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MAXIMUM: {\n return ParseMaximum(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MAX_POOL_2D: {\n return ParsePool(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MIRROR_PAD: {\n return ParseMirrorPad(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MEAN: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MINIMUM: {\n return ParseMinimum(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_MUL: {\n return ParseMul(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_NEG: {\n return ParseNeg(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_NOT_EQUAL: {\n return ParseNotEqual(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_PACK: {\n return ParsePack(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_PAD: {\n return ParsePad(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_PADV2: {\n return ParsePadV2(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_POW: {\n return ParsePow(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_PRELU: {\n return ParsePrelu(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_QUANTIZE: {\n return ParseQuantize(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_READ_VARIABLE: {\n return ParseReadVariable(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_REDUCE_ANY: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_REDUCE_ALL: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_REDUCE_MAX: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_REDUCE_MIN: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_REDUCE_PROD: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RELU: {\n return ParseRelu(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RELU6: {\n return ParseRelu6(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RESHAPE: {\n return ParseReshape(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RESIZE_BILINEAR: {\n return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {\n return ParseResizeNearestNeighbor(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_ROUND: {\n return ParseRound(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RSQRT: {\n return ParseRsqrt(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SELECT_V2: {\n return ParseSelectV2(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SHAPE: {\n return ParseShape(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SIN: {\n return ParseSin(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SOFTMAX: {\n return ParseSoftmax(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SPACE_TO_BATCH_ND: {\n return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SPACE_TO_DEPTH: {\n return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SPLIT: {\n return ParseSplit(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SPLIT_V: {\n return ParseSplitV(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SQRT: {\n return ParseSqrt(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SQUARE: {\n return ParseSquare(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SQUARED_DIFFERENCE: {\n return ParseSquaredDifference(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_SQUEEZE: {\n return ParseSqueeze(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_STRIDED_SLICE: {\n return ParseStridedSlice(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SUB: {\n return ParseSub(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SUM: {\n return ParseReducer(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SVDF: {\n return ParseSvdf(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_TANH: {\n return ParseTanh(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_TRANSPOSE_CONV: {\n return ParseTransposeConv(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_UNPACK: {\n return ParseUnpack(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_VAR_HANDLE: {\n return ParseVarHandle(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_ZEROS_LIKE: {\n return ParseZerosLike(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_BITWISE_XOR: {\n return ParseBitwiseXor(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_RIGHT_SHIFT: {\n return ParseRightShift(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_CAST: {\n return ParseCast(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_LSH_PROJECTION: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* lshParams =\n op->builtin_options_as_LSHProjectionOptions()) {\n params->type = parseLSHProjectionType(lshParams->type());\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* sequence_rnn_params =\n op->builtin_options_as_SequenceRNNOptions()) {\n params->activation =\n ConvertActivation(sequence_rnn_params->fused_activation_function());\n params->time_major = sequence_rnn_params->time_major();\n params->asymmetric_quantize_inputs =\n sequence_rnn_params->asymmetric_quantize_inputs();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {\n auto params =\n safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* bidi_sequence_rnn_params =\n op->builtin_options_as_BidirectionalSequenceRNNOptions()) {\n params->activation = ConvertActivation(\n bidi_sequence_rnn_params->fused_activation_function());\n params->time_major = bidi_sequence_rnn_params->time_major();\n params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();\n params->asymmetric_quantize_inputs =\n bidi_sequence_rnn_params->asymmetric_quantize_inputs();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_RNN: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {\n params->activation =\n ConvertActivation(rnn_params->fused_activation_function());\n params->asymmetric_quantize_inputs =\n rnn_params->asymmetric_quantize_inputs();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {\n auto params =\n safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* embedding_params =\n op->builtin_options_as_EmbeddingLookupSparseOptions()) {\n params->combiner = parseCombinerType(embedding_params->combiner());\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_HASHTABLE_LOOKUP:\n return kTfLiteOk;\n case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* schema_params =\n op->builtin_options_as_LocalResponseNormalizationOptions()) {\n params->radius = schema_params->radius();\n params->bias = schema_params->bias();\n params->alpha = schema_params->alpha();\n params->beta = schema_params->beta();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {\n return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {\n auto params =\n safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* bidi_lstm_params =\n op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {\n params->activation =\n ConvertActivation(bidi_lstm_params->fused_activation_function());\n params->cell_clip = bidi_lstm_params->cell_clip();\n params->proj_clip = bidi_lstm_params->proj_clip();\n params->merge_outputs = bidi_lstm_params->merge_outputs();\n params->time_major = bidi_lstm_params->time_major();\n params->asymmetric_quantize_inputs =\n bidi_lstm_params->asymmetric_quantize_inputs();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_SKIP_GRAM: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* skip_gram_params =\n op->builtin_options_as_SkipGramOptions()) {\n params->ngram_size = skip_gram_params->ngram_size();\n params->max_skip_size = skip_gram_params->max_skip_size();\n params->include_all_ngrams = skip_gram_params->include_all_ngrams();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_GATHER: {\n return ParseGather(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_SPARSE_TO_DENSE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* sparse_to_dense_params =\n op->builtin_options_as_SparseToDenseOptions()) {\n params->validate_indices = sparse_to_dense_params->validate_indices();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_DELEGATE: {\n TF_LITE_REPORT_ERROR(error_reporter,\n \"DELEGATE op shouldn't exist in model.\");\n return kTfLiteError;\n }\n case BuiltinOperator_FAKE_QUANT: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* schema_params =\n op->builtin_options_as_FakeQuantOptions()) {\n params->min = schema_params->min();\n params->max = schema_params->max();\n params->num_bits = schema_params->num_bits();\n params->narrow_range = schema_params->narrow_range();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_ONE_HOT: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {\n params->axis = schema_params->axis();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_UNIQUE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const auto* unique_params = op->builtin_options_as_UniqueOptions();\n if (unique_params != nullptr) {\n params->index_out_type =\n unique_params->idx_out_type() == tflite::TensorType_INT64\n ? TfLiteType::kTfLiteInt64\n : TfLiteType::kTfLiteInt32;\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_REVERSE_SEQUENCE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* reverse_seq_params =\n op->builtin_options_as_ReverseSequenceOptions()) {\n params->seq_dim = reverse_seq_params->seq_dim();\n params->batch_dim = reverse_seq_params->batch_dim();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_IF: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* if_params = op->builtin_options_as_IfOptions()) {\n params->then_subgraph_index = if_params->then_subgraph_index();\n params->else_subgraph_index = if_params->else_subgraph_index();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_WHILE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* while_params = op->builtin_options_as_WhileOptions()) {\n params->cond_subgraph_index = while_params->cond_subgraph_index();\n params->body_subgraph_index = while_params->body_subgraph_index();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_CONV_3D:\n case BuiltinOperator_CONV_3D_TRANSPOSE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {\n params->padding = ConvertPadding(conv3d_params->padding());\n params->activation =\n ConvertActivation(conv3d_params->fused_activation_function());\n params->stride_depth = conv3d_params->stride_d();\n params->stride_height = conv3d_params->stride_h();\n params->stride_width = conv3d_params->stride_w();\n params->dilation_depth_factor = conv3d_params->dilation_d_factor();\n params->dilation_height_factor = conv3d_params->dilation_h_factor();\n params->dilation_width_factor = conv3d_params->dilation_w_factor();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_HASHTABLE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* hashtable_params =\n op->builtin_options_as_HashtableOptions()) {\n params->table_id = hashtable_params->table_id();\n TF_LITE_ENSURE_STATUS(ConvertTensorType(\n hashtable_params->key_dtype(), &params->key_dtype, error_reporter));\n TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),\n &params->value_dtype,\n error_reporter));\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_MULTINOMIAL: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* multinomial_params =\n op->builtin_options_as_RandomOptions()) {\n params->seed = multinomial_params->seed();\n params->seed2 = multinomial_params->seed2();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_RANDOM_STANDARD_NORMAL: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* random_std_normal_params =\n op->builtin_options_as_RandomOptions()) {\n params->seed = random_std_normal_params->seed();\n params->seed2 = random_std_normal_params->seed2();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_BUCKETIZE: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* bucketize_params =\n op->builtin_options_as_BucketizeOptions()) {\n const flatbuffers::Vector* boundaries =\n bucketize_params->boundaries();\n if (boundaries == nullptr) {\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"boundaries array not provided for operation 'bucketize'.\\n\");\n return kTfLiteError;\n }\n params->num_boundaries = boundaries->size();\n if (boundaries->data() == nullptr) {\n TF_LITE_REPORT_ERROR(error_reporter,\n \"boundaries.data() returned nullptr for \"\n \"operation 'bucketize'.\\n\");\n return kTfLiteError;\n }\n params->boundaries = boundaries->data();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_RANDOM_UNIFORM: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* random_uniform_params =\n op->builtin_options_as_RandomOptions()) {\n params->seed = random_uniform_params->seed();\n params->seed2 = random_uniform_params->seed2();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_GELU: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) {\n params->approximate = gelu_params->approximate();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_STABLEHLO_SCATTER: {\n return ParseStablehloScatter(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_STABLEHLO_RNG_BIT_GENERATOR: {\n return ParseStablehloRngBitGenerator(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_STABLEHLO_GATHER: {\n return ParseStablehloGather(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_STABLEHLO_REDUCE_WINDOW: {\n return ParseStablehloReduceWindow(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_REDUCE_WINDOW: {\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* reduce_params =\n op->builtin_options_2_as_ReduceWindowOptions()) {\n switch (reduce_params->reduce_function()) {\n case ReduceWindowFunction_ADD:\n params->reduce_function = TfLiteReduceWindowFunctionAdd;\n break;\n case ReduceWindowFunction_MUL:\n params->reduce_function = TfLiteReduceWindowFunctionMul;\n break;\n case ReduceWindowFunction_MINIMUM:\n params->reduce_function = TfLiteReduceWindowFunctionMin;\n break;\n case ReduceWindowFunction_MAXIMUM:\n params->reduce_function = TfLiteReduceWindowFunctionMax;\n break;\n case ReduceWindowFunction_ALL:\n params->reduce_function = TfLiteReduceWindowFunctionAll;\n break;\n case ReduceWindowFunction_ANY:\n params->reduce_function = TfLiteReduceWindowFunctionAny;\n break;\n case ReduceWindowFunction_UNSUPPORTED:\n default:\n return kTfLiteError;\n }\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n case BuiltinOperator_STABLEHLO_PAD: {\n return ParseStablehloPad(op, error_reporter, allocator, builtin_data);\n }\n case BuiltinOperator_STABLEHLO_COMPOSITE: {\n return ParseStablehloComposite(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_STABLEHLO_SHIFT_LEFT: {\n return ParseStablehloShiftLeft(op, error_reporter, allocator,\n builtin_data);\n }\n case BuiltinOperator_STABLEHLO_SLICE:\n case BuiltinOperator_STABLEHLO_BROADCAST_IN_DIM:\n case BuiltinOperator_STABLEHLO_CONVOLUTION:\n case BuiltinOperator_STABLEHLO_LOGISTIC:\n case BuiltinOperator_STABLEHLO_ADD:\n case BuiltinOperator_STABLEHLO_DIVIDE:\n case BuiltinOperator_STABLEHLO_MULTIPLY:\n case BuiltinOperator_STABLEHLO_MAXIMUM:\n case BuiltinOperator_STABLEHLO_RESHAPE:\n case BuiltinOperator_STABLEHLO_CLAMP:\n case BuiltinOperator_STABLEHLO_CONCATENATE:\n case BuiltinOperator_STABLEHLO_CUSTOM_CALL:\n case BuiltinOperator_STABLEHLO_REDUCE:\n case BuiltinOperator_STABLEHLO_ABS:\n case BuiltinOperator_STABLEHLO_AND:\n case BuiltinOperator_STABLEHLO_COSINE:\n case BuiltinOperator_STABLEHLO_EXPONENTIAL:\n case BuiltinOperator_STABLEHLO_FLOOR:\n case BuiltinOperator_STABLEHLO_LOG:\n case BuiltinOperator_STABLEHLO_MINIMUM:\n case BuiltinOperator_STABLEHLO_NEGATE:\n case BuiltinOperator_STABLEHLO_OR:\n case BuiltinOperator_STABLEHLO_POWER:\n case BuiltinOperator_STABLEHLO_REMAINDER:\n case BuiltinOperator_STABLEHLO_RSQRT:\n case BuiltinOperator_STABLEHLO_SELECT:\n case BuiltinOperator_STABLEHLO_SUBTRACT:\n case BuiltinOperator_STABLEHLO_TANH:\n case BuiltinOperator_STABLEHLO_DYNAMIC_SLICE:\n case BuiltinOperator_STABLEHLO_DYNAMIC_UPDATE_SLICE:\n case BuiltinOperator_STABLEHLO_IOTA:\n case BuiltinOperator_STABLEHLO_COMPARE:\n case BuiltinOperator_STABLEHLO_CONVERT:\n case BuiltinOperator_STABLEHLO_DOT_GENERAL:\n case BuiltinOperator_STABLEHLO_SORT:\n case BuiltinOperator_STABLEHLO_WHILE:\n case BuiltinOperator_STABLEHLO_TRANSPOSE:\n case BuiltinOperator_STABLEHLO_CBRT:\n case BuiltinOperator_CALL:\n case BuiltinOperator_COMPLEX_ABS:\n case BuiltinOperator_CONCAT_EMBEDDINGS:\n case BuiltinOperator_COS:\n case BuiltinOperator_CUSTOM:\n case BuiltinOperator_DENSIFY:\n case BuiltinOperator_DYNAMIC_UPDATE_SLICE:\n case BuiltinOperator_EQUAL:\n case BuiltinOperator_HASHTABLE_FIND:\n case BuiltinOperator_HASHTABLE_IMPORT:\n case BuiltinOperator_HASHTABLE_SIZE:\n case BuiltinOperator_IMAG:\n case BuiltinOperator_MATRIX_DIAG:\n case BuiltinOperator_MATRIX_SET_DIAG:\n case BuiltinOperator_NON_MAX_SUPPRESSION_V4:\n case BuiltinOperator_NON_MAX_SUPPRESSION_V5:\n case BuiltinOperator_RELU_N1_TO_1:\n case BuiltinOperator_RELU_0_TO_1:\n case BuiltinOperator_SCATTER_ND:\n case BuiltinOperator_SELECT:\n case BuiltinOperator_SLICE:\n case BuiltinOperator_TILE:\n case BuiltinOperator_TOPK_V2:\n case BuiltinOperator_TRANSPOSE:\n case BuiltinOperator_RANGE:\n case BuiltinOperator_RANK:\n case BuiltinOperator_REAL:\n case BuiltinOperator_RFFT2D:\n case BuiltinOperator_SEGMENT_SUM:\n case BuiltinOperator_REVERSE_V2:\n case BuiltinOperator_UNSORTED_SEGMENT_MAX:\n case BuiltinOperator_UNSORTED_SEGMENT_MIN:\n case BuiltinOperator_UNSORTED_SEGMENT_PROD:\n case BuiltinOperator_UNSORTED_SEGMENT_SUM:\n case BuiltinOperator_ATAN2:\n case BuiltinOperator_SIGN:\n case BuiltinOperator_BITCAST:\n case BuiltinOperator_WHERE:\n case BuiltinOperator_DILATE:\n return kTfLiteOk;\n case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:\n return kTfLiteError;\n }\n return kTfLiteError;\n} \n#endif \n} \nTfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,\n ErrorReporter* error_reporter) {\n switch (tensor_type) {\n case TensorType_FLOAT16:\n *type = kTfLiteFloat16;\n return kTfLiteOk;\n case TensorType_BFLOAT16:\n *type = kTfLiteBFloat16;\n return kTfLiteOk;\n case TensorType_FLOAT32:\n *type = kTfLiteFloat32;\n return kTfLiteOk;\n case TensorType_FLOAT64:\n *type = kTfLiteFloat64;\n return kTfLiteOk;\n case TensorType_INT16:\n *type = kTfLiteInt16;\n return kTfLiteOk;\n case TensorType_UINT16:\n *type = kTfLiteUInt16;\n return kTfLiteOk;\n case TensorType_INT32:\n *type = kTfLiteInt32;\n return kTfLiteOk;\n case TensorType_UINT32:\n *type = kTfLiteUInt32;\n return kTfLiteOk;\n case TensorType_UINT8:\n *type = kTfLiteUInt8;\n return kTfLiteOk;\n case TensorType_INT8:\n *type = kTfLiteInt8;\n return kTfLiteOk;\n case TensorType_INT64:\n *type = kTfLiteInt64;\n return kTfLiteOk;\n case TensorType_UINT64:\n *type = kTfLiteUInt64;\n return kTfLiteOk;\n case TensorType_STRING:\n *type = kTfLiteString;\n return kTfLiteOk;\n case TensorType_BOOL:\n *type = kTfLiteBool;\n return kTfLiteOk;\n case TensorType_COMPLEX64:\n *type = kTfLiteComplex64;\n return kTfLiteOk;\n case TensorType_COMPLEX128:\n *type = kTfLiteComplex128;\n return kTfLiteOk;\n case TensorType_RESOURCE:\n *type = kTfLiteResource;\n return kTfLiteOk;\n case TensorType_VARIANT:\n *type = kTfLiteVariant;\n return kTfLiteOk;\n case TensorType_INT4:\n *type = kTfLiteInt4;\n return kTfLiteOk;\n default:\n *type = kTfLiteNoType;\n TF_LITE_REPORT_ERROR(error_reporter,\n \"Unsupported data type %d in tensor\\n\", tensor_type);\n return kTfLiteError;\n }\n}\nTfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const AddOptions* schema_params = op->builtin_options_as_AddOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->pot_scale_int16 = schema_params->pot_scale_int16();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();\n if (schema_params != nullptr) {\n TF_LITE_ENSURE_STATUS(ConvertTensorType(\n schema_params->output_type(), &params->output_type, error_reporter));\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();\n if (schema_params != nullptr) {\n TF_LITE_ENSURE_STATUS(ConvertTensorType(\n schema_params->output_type(), &params->output_type, error_reporter));\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {\n params->adj_x = bmm_params->adj_x();\n params->adj_y = bmm_params->adj_y();\n params->asymmetric_quantize_inputs =\n bmm_params->asymmetric_quantize_inputs();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const CallOnceOptions* schema_params =\n op->builtin_options_as_CallOnceOptions();\n if (schema_params != nullptr) {\n params->init_subgraph_index = schema_params->init_subgraph_index();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* schema_params = op->builtin_options_as_CastOptions()) {\n TF_LITE_ENSURE_STATUS(ConvertTensorType(\n schema_params->in_data_type(), &params->in_data_type, error_reporter));\n TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),\n &params->out_data_type,\n error_reporter));\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseConcatenation(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ConcatenationOptions* schema_params =\n op->builtin_options_as_ConcatenationOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->axis = schema_params->axis();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();\n if (schema_params != nullptr) {\n params->padding = ConvertPadding(schema_params->padding());\n params->stride_width = schema_params->stride_w();\n params->stride_height = schema_params->stride_h();\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->dilation_width_factor = schema_params->dilation_w_factor();\n params->dilation_height_factor = schema_params->dilation_h_factor();\n TF_LITE_ENSURE_STATUS(\n ConvertTensorType(schema_params->quantized_bias_type(),\n &params->quantized_bias_type, error_reporter));\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {\n params->exclusive = cumsum_params->exclusive();\n params->reverse = cumsum_params->reverse();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseDepthToSpace(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();\n if (schema_params != nullptr) {\n params->block_size = schema_params->block_size();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseDepthwiseConv2D(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const DepthwiseConv2DOptions* schema_params =\n op->builtin_options_as_DepthwiseConv2DOptions();\n if (schema_params != nullptr) {\n params->padding = ConvertPadding(schema_params->padding());\n params->stride_width = schema_params->stride_w();\n params->stride_height = schema_params->stride_h();\n params->depth_multiplier = schema_params->depth_multiplier();\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->dilation_width_factor = schema_params->dilation_w_factor();\n params->dilation_height_factor = schema_params->dilation_h_factor();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* schema_params = op->builtin_options_as_DivOptions()) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseEmbeddingLookup(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseFullyConnected(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const FullyConnectedOptions* schema_params =\n op->builtin_options_as_FullyConnectedOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->keep_num_dims = schema_params->keep_num_dims();\n params->asymmetric_quantize_inputs =\n schema_params->asymmetric_quantize_inputs();\n TF_LITE_ENSURE_STATUS(\n ConvertTensorType(schema_params->quantized_bias_type(),\n &params->quantized_bias_type, error_reporter));\n switch (schema_params->weights_format()) {\n case FullyConnectedOptionsWeightsFormat_DEFAULT:\n params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;\n break;\n case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:\n params->weights_format =\n kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;\n break;\n default:\n TF_LITE_REPORT_ERROR(error_reporter,\n \"Unhandled fully-connected weights format.\");\n return kTfLiteError;\n }\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n params->axis = 0;\n params->batch_dims = 0;\n if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {\n params->axis = gather_params->axis();\n params->batch_dims = gather_params->batch_dims();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseGreater(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const IfOptions* schema_params = op->builtin_options_as_IfOptions();\n if (schema_params != nullptr) {\n params->then_subgraph_index = schema_params->then_subgraph_index();\n params->else_subgraph_index = schema_params->else_subgraph_index();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseL2Normalization(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* leaky_relu_params =\n op->builtin_options_as_LeakyReluOptions()) {\n params->alpha = leaky_relu_params->alpha();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {\n params->activation =\n ConvertActivation(lstm_params->fused_activation_function());\n params->cell_clip = lstm_params->cell_clip();\n params->proj_clip = lstm_params->proj_clip();\n switch (lstm_params->kernel_type()) {\n case LSTMKernelType_FULL:\n params->kernel_type = kTfLiteLSTMFullKernel;\n break;\n case LSTMKernelType_BASIC:\n params->kernel_type = kTfLiteLSTMBasicKernel;\n break;\n default:\n TF_LITE_REPORT_ERROR(error_reporter, \"Unhandled LSTM kernel type: %d\",\n lstm_params->kernel_type());\n return kTfLiteError;\n }\n params->asymmetric_quantize_inputs =\n lstm_params->asymmetric_quantize_inputs();\n } else {\n TF_LITE_REPORT_ERROR(error_reporter, \"No valid LSTM builtin options exist\");\n return kTfLiteError;\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const MirrorPadOptions* schema_params =\n op->builtin_options_as_MirrorPadOptions();\n if (schema_params != nullptr) {\n params->mode = ConvertMirrorPadding(schema_params->mode());\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const MulOptions* schema_params = op->builtin_options_as_MulOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const PackOptions* schema_params = op->builtin_options_as_PackOptions();\n if (schema_params != nullptr) {\n params->values_count = schema_params->values_count();\n params->axis = schema_params->axis();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();\n if (schema_params != nullptr) {\n params->padding = ConvertPadding(schema_params->padding());\n params->stride_width = schema_params->stride_w();\n params->stride_height = schema_params->stride_h();\n params->filter_width = schema_params->filter_width();\n params->filter_height = schema_params->filter_height();\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();\n if (schema_params != nullptr) {\n params->keep_dims = schema_params->keep_dims();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();\n if (schema_params != nullptr) {\n const flatbuffers::Vector* new_shape = schema_params->new_shape();\n if (new_shape != nullptr) {\n TF_LITE_ENSURE_STATUS(\n FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,\n params->shape, error_reporter, \"reshape\"));\n params->num_dimensions = new_shape->size();\n } else {\n }\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseResizeBilinear(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ResizeBilinearOptions* schema_params =\n op->builtin_options_as_ResizeBilinearOptions();\n if (schema_params != nullptr) {\n params->align_corners = schema_params->align_corners();\n params->half_pixel_centers = schema_params->half_pixel_centers();\n } else {\n params->align_corners = false;\n params->half_pixel_centers = false;\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseResizeNearestNeighbor(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ResizeNearestNeighborOptions* schema_params =\n op->builtin_options_as_ResizeNearestNeighborOptions();\n if (schema_params != nullptr) {\n params->align_corners = schema_params->align_corners();\n params->half_pixel_centers = schema_params->half_pixel_centers();\n } else {\n params->align_corners = false;\n params->half_pixel_centers = false;\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseStablehloReduceWindow(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n const StablehloReduceWindowOptions* schema_params =\n op->builtin_options_2_as_StablehloReduceWindowOptions();\n if (schema_params) {\n if (!schema_params->window_dimensions() ||\n schema_params->window_dimensions()->size() == 0) {\n TF_LITE_REPORT_ERROR(error_reporter,\n \"'window_dimensions' attribute is not optional for \"\n \"'stablehlo.reduce_window' and cannot be empty.\");\n return kTfLiteError;\n }\n const size_t rank = schema_params->window_dimensions()->size();\n auto LoadAttr = [&error_reporter](\n int64_t* params_array, size_t params_array_size_bytes,\n const flatbuffers::Vector* flatbuffer_vector,\n const char* attr_name, const size_t expected_size,\n const int64_t fill_value) -> TfLiteStatus {\n if (flatbuffer_vector && flatbuffer_vector->size()) {\n if (expected_size != 0 && flatbuffer_vector->size() != expected_size) {\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"'%s' attribute of 'stablehlo.reduce_window' does not have the \"\n \"expected size (%llu != %llu).\",\n attr_name, flatbuffer_vector->size(), expected_size);\n return kTfLiteError;\n }\n TfLiteStatus status = FlatBufferIntVectorToArray(\n params_array_size_bytes, flatbuffer_vector, params_array,\n error_reporter, \"stablehlo.reduce_window\");\n if (status != kTfLiteOk) {\n TF_LITE_REPORT_ERROR(error_reporter, \"Check the '%s' attribute.\",\n attr_name);\n return status;\n }\n } else {\n std::fill_n(params_array, params_array_size_bytes / sizeof(int64_t),\n fill_value);\n }\n return kTfLiteOk;\n };\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->window_dimensions, sizeof(params->window_dimensions),\n schema_params->window_dimensions(), \"window_dimensions\",\n rank, 1));\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->window_strides, sizeof(params->window_strides),\n schema_params->window_strides(), \"window_strides\",\n rank, 1));\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->base_dilations, sizeof(params->base_dilations),\n schema_params->base_dilations(), \"base_dilations\",\n rank, 1));\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->window_dilations, sizeof(params->window_dilations),\n schema_params->window_dilations(), \"window_dilations\",\n rank, 1));\n TF_LITE_ENSURE_STATUS(LoadAttr(params->padding, sizeof(params->padding),\n schema_params->padding(), \"padding\",\n 2 * rank,\n 0));\n params->body_subgraph_index = schema_params->body_subgraph_index();\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"Could not get 'stablehlo.reduce_window' operation parameters.\");\n return kTfLiteError;\n}\nTfLiteStatus ParseStablehloScatter(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const StablehloScatterOptions* schema_params =\n op->builtin_options_2_as_StablehloScatterOptions();\n if (schema_params) {\n params->indices_are_sorted = schema_params->indices_are_sorted();\n if (schema_params->update_window_dims()) {\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->update_window_dims()->size() * sizeof(int64_t),\n schema_params->update_window_dims(), params->update_window_dims,\n error_reporter, \"stablehlo_scatter\"));\n params->num_update_window_dims =\n schema_params->update_window_dims()->size();\n }\n if (schema_params->inserted_window_dims()) {\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->inserted_window_dims()->size() * sizeof(int64_t),\n schema_params->inserted_window_dims(), params->inserted_window_dims,\n error_reporter, \"stablehlo_scatter\"));\n params->num_inserted_window_dims =\n schema_params->inserted_window_dims()->size();\n }\n if (schema_params->scatter_dims_to_operand_dims()) {\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->scatter_dims_to_operand_dims()->size() *\n sizeof(int64_t),\n schema_params->scatter_dims_to_operand_dims(),\n params->scatter_dims_to_operand_dims, error_reporter,\n \"stablehlo_scatter\"));\n params->num_scatter_dims_to_operand_dims =\n schema_params->scatter_dims_to_operand_dims()->size();\n }\n params->index_vector_dim = schema_params->index_vector_dim();\n params->unique_indices = schema_params->unique_indices();\n params->update_computation_subgraph_index =\n schema_params->update_computation_subgraph_index();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseStablehloRngBitGenerator(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const StablehloRngBitGeneratorOptions* schema_params =\n op->builtin_options_2_as_StablehloRngBitGeneratorOptions();\n if (schema_params != nullptr) {\n params->algorithm = ConvertRngAlgorithm(schema_params->algorithm());\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseStablehloGather(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const StablehloGatherOptions* schema_params =\n op->builtin_options_2_as_StablehloGatherOptions();\n if (schema_params != nullptr) {\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->offset_dims()->size() *\n sizeof(int64_t),\n schema_params->offset_dims(),\n params->offset_dims, error_reporter,\n \"stablehlo_gather\"));\n params->num_offset_dims = schema_params->offset_dims()->size();\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->collapsed_slice_dims()->size() * sizeof(int64_t),\n schema_params->collapsed_slice_dims(), params->collapsed_slice_dims,\n error_reporter, \"stablehlo_gather\"));\n params->num_collapsed_slice_dims =\n schema_params->collapsed_slice_dims()->size();\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->start_index_map()->size() * sizeof(int64_t),\n schema_params->start_index_map(), params->start_index_map,\n error_reporter, \"stablehlo_gather\"));\n params->num_start_index_map = schema_params->start_index_map()->size();\n params->index_vector_dim = schema_params->index_vector_dim();\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n schema_params->slice_sizes()->size() * sizeof(int64_t),\n schema_params->slice_sizes(), params->slice_sizes, error_reporter,\n \"stablehlo_gather\"));\n params->num_slice_sizes = schema_params->slice_sizes()->size();\n params->indices_are_sorted = schema_params->indices_are_sorted();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseStablehloPad(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n const StablehloPadOptions* schema_params =\n op->builtin_options_2_as_StablehloPadOptions();\n if (schema_params) {\n auto LoadAttr =\n [&error_reporter](\n int64_t* params_array, const size_t params_array_size_bytes,\n const flatbuffers::Vector* const flatbuffer_vector,\n const char* const attr_name) -> TfLiteStatus {\n TfLiteStatus status = FlatBufferIntVectorToArray(\n params_array_size_bytes, flatbuffer_vector, params_array,\n error_reporter, \"stablehlo.pad\");\n if (status != kTfLiteOk) {\n TF_LITE_REPORT_ERROR(error_reporter, \"Check the '%s' attribute.\",\n attr_name);\n }\n return status;\n };\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->edge_padding_low, sizeof(params->edge_padding_low),\n schema_params->edge_padding_low(), \"edge_padding_low\"));\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->edge_padding_high, sizeof(params->edge_padding_high),\n schema_params->edge_padding_high(), \"edge_padding_high\"));\n TF_LITE_ENSURE_STATUS(\n LoadAttr(params->interior_padding, sizeof(params->interior_padding),\n schema_params->interior_padding(), \"interior_padding\"));\n if (schema_params->edge_padding_low()->size() !=\n schema_params->edge_padding_high()->size() ||\n schema_params->edge_padding_low()->size() !=\n schema_params->interior_padding()->size()) {\n TF_LITE_REPORT_ERROR(error_reporter,\n \"'stablehlo.pad' operation parameter array sizes \"\n \"are not consistent.\");\n return kTfLiteError;\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n TF_LITE_REPORT_ERROR(error_reporter,\n \"Could not get 'stablehlo.pad' operation parameters.\");\n return kTfLiteError;\n}\nTfLiteStatus ParseStablehloComposite(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params = safe_allocator.Allocate();\n const StableHLOCompositeOptions* schema_params =\n op->builtin_options_2_as_StableHLOCompositeOptions();\n if (schema_params) {\n params->name = schema_params->name()->c_str();\n params->version = schema_params->version();\n params->subgraph_index = schema_params->decomposition_subgraph_index();\n params->attributes = schema_params->composite_attributes()->data();\n params->attributes_size = schema_params->composite_attributes()->size();\n *builtin_data = params.release();\n return kTfLiteOk;\n }\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"Could not get 'stablehlo.composite' operation parameters.\");\n return kTfLiteError;\n}\nTfLiteStatus ParseStablehloShiftLeft(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();\n if (schema_params != nullptr) {\n TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),\n &params->out_type, error_reporter));\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();\n if (schema_params != nullptr) {\n params->beta = schema_params->beta();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSpaceToDepth(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();\n if (schema_params != nullptr) {\n params->block_size = schema_params->block_size();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();\n if (schema_params != nullptr) {\n params->num_splits = schema_params->num_splits();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();\n if (schema_params != nullptr) {\n params->num_splits = schema_params->num_splits();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n auto params =\n safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n if (const auto* seq_lstm_params =\n op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {\n params->activation =\n ConvertActivation(seq_lstm_params->fused_activation_function());\n params->cell_clip = seq_lstm_params->cell_clip();\n params->proj_clip = seq_lstm_params->proj_clip();\n params->time_major = seq_lstm_params->time_major();\n params->asymmetric_quantize_inputs =\n seq_lstm_params->asymmetric_quantize_inputs();\n params->diagonal_recurrent_tensors =\n seq_lstm_params->diagonal_recurrent_tensors();\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();\n if (schema_params != nullptr) {\n const auto* squeeze_dims = schema_params->squeeze_dims();\n if (squeeze_dims != nullptr) {\n TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(\n sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,\n error_reporter, \"squeeze\"));\n params->num_squeeze_dims = squeeze_dims->size();\n } else {\n params->num_squeeze_dims = 0;\n }\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseStridedSlice(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const StridedSliceOptions* schema_params =\n op->builtin_options_as_StridedSliceOptions();\n if (schema_params != nullptr) {\n params->begin_mask = schema_params->begin_mask();\n params->end_mask = schema_params->end_mask();\n params->ellipsis_mask = schema_params->ellipsis_mask();\n params->new_axis_mask = schema_params->new_axis_mask();\n params->shrink_axis_mask = schema_params->shrink_axis_mask();\n params->offset = schema_params->offset();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SubOptions* schema_params = op->builtin_options_as_SubOptions();\n if (schema_params != nullptr) {\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->pot_scale_int16 = schema_params->pot_scale_int16();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();\n if (schema_params != nullptr) {\n params->rank = schema_params->rank();\n params->activation =\n ConvertActivation(schema_params->fused_activation_function());\n params->asymmetric_quantize_inputs =\n schema_params->asymmetric_quantize_inputs();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,\n void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseTransposeConv(const Operator* op,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const TransposeConvOptions* transpose_conv_params =\n op->builtin_options_as_TransposeConvOptions();\n if (transpose_conv_params != nullptr) {\n params->padding = ConvertPadding(transpose_conv_params->padding());\n params->stride_width = transpose_conv_params->stride_w();\n params->stride_height = transpose_conv_params->stride_h();\n params->activation =\n ConvertActivation(transpose_conv_params->fused_activation_function());\n TF_LITE_ENSURE_STATUS(\n ConvertTensorType(transpose_conv_params->quantized_bias_type(),\n &params->quantized_bias_type, error_reporter));\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();\n if (schema_params != nullptr) {\n params->num = schema_params->num();\n params->axis = schema_params->axis();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator,\n void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const VarHandleOptions* schema_params =\n op->builtin_options_as_VarHandleOptions();\n if (schema_params != nullptr) {\n if (schema_params->container()) {\n params->container = schema_params->container()->c_str();\n }\n if (schema_params->shared_name()) {\n params->shared_name = schema_params->shared_name()->c_str();\n }\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n CheckParsePointerParams(op, error_reporter, allocator, builtin_data);\n SafeBuiltinDataAllocator safe_allocator(allocator);\n std::unique_ptr\n params = safe_allocator.Allocate();\n TF_LITE_ENSURE(error_reporter, params != nullptr);\n const WhileOptions* schema_params = op->builtin_options_as_WhileOptions();\n if (schema_params != nullptr) {\n params->cond_subgraph_index = schema_params->cond_subgraph_index();\n params->body_subgraph_index = schema_params->body_subgraph_index();\n } else {\n }\n *builtin_data = params.release();\n return kTfLiteOk;\n}\nTfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseBitwiseXor(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseRightShift(const Operator*, ErrorReporter*,\n BuiltinDataAllocator*, void**) {\n return kTfLiteOk;\n}\nTfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,\n ErrorReporter* error_reporter,\n BuiltinDataAllocator* allocator, void** builtin_data) {\n#ifdef TF_LITE_STATIC_MEMORY\n TF_LITE_REPORT_ERROR(\n error_reporter,\n \"ParseOpData is unsupported on TfLiteMicro, please use the operator \"\n \"specific parse functions (e.g. ParseAdd etc.).\\n\");\n return kTfLiteError;\n#else\n return ParseOpDataTfLite(op, op_type, error_reporter, allocator,\n builtin_data);\n#endif\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/lite/core/api/flatbuffer_conversions.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"flatbuffers/buffer.h\" \n#include \"flatbuffers/flatbuffer_builder.h\" \n#include \"tensorflow/compiler/mlir/lite/core/api/error_reporter.h\"\n#include \"tensorflow/lite/core/c/builtin_op_data.h\"\n#include \"tensorflow/lite/core/c/c_api_types.h\"\n#include \"tensorflow/lite/schema/schema_generated.h\"\n#include \"tensorflow/lite/string_type.h\"\nusing testing::AllOf;\nusing testing::Each;\nusing testing::ElementsAre;\nusing testing::Eq;\nusing testing::HasSubstr;\nusing testing::StrEq;\nnamespace tflite {\nnamespace {\nclass MockErrorReporter : public ErrorReporter {\n public:\n MockErrorReporter() : buffer_size_(0) {}\n int Report(const char* format, va_list args) override {\n buffer_size_ += vsnprintf(buffer_ + buffer_size_,\n kBufferSize - buffer_size_, format, args);\n return buffer_size_;\n }\n const char* GetBuffer() const { return buffer_; }\n int GetBufferSize() const { return buffer_size_; }\n bool IsEmpty() const { return !buffer_size_; }\n string GetString() const { return string(buffer_, buffer_size_); }\n private:\n static constexpr int kBufferSize = 256;\n char buffer_[kBufferSize];\n int buffer_size_;\n};\nclass MockDataAllocator : public BuiltinDataAllocator {\n public:\n MockDataAllocator() : is_allocated_(false) {}\n void* Allocate(size_t size, size_t alignment_hint) override {\n EXPECT_FALSE(is_allocated_);\n const int max_size = kBufferSize;\n EXPECT_LE(size, max_size);\n is_allocated_ = true;\n return buffer_;\n }\n void Deallocate(void* data) override { is_allocated_ = false; }\n private:\n static constexpr int kBufferSize = 1024;\n char buffer_[kBufferSize];\n bool is_allocated_;\n};\n} \nclass FlatbufferConversionsTest : public ::testing::Test {\n public:\n const Operator* BuildTestOperator(BuiltinOptions op_type,\n flatbuffers::Offset options) {\n flatbuffers::Offset offset =\n CreateOperatorDirect(builder_, 0, nullptr, nullptr, op_type, options,\n nullptr, CustomOptionsFormat_FLEXBUFFERS, nullptr);\n builder_.Finish(offset);\n void* pointer = builder_.GetBufferPointer();\n return flatbuffers::GetRoot(pointer);\n }\n const Operator* BuildTestOperator(BuiltinOptions2 op_type,\n flatbuffers::Offset options) {\n flatbuffers::Offset offset = CreateOperatorDirect(\n builder_, 0, nullptr, nullptr,\n tflite::BuiltinOptions_NONE,\n 0, nullptr,\n tflite::CustomOptionsFormat_FLEXBUFFERS,\n nullptr, nullptr,\n 0, 0,\n op_type,\n options);\n builder_.Finish(offset);\n void* pointer = builder_.GetBufferPointer();\n return flatbuffers::GetRoot(pointer);\n }\n protected:\n MockErrorReporter mock_reporter_;\n MockDataAllocator mock_allocator_;\n flatbuffers::FlatBufferBuilder builder_;\n};\nTEST_F(FlatbufferConversionsTest, ParseSqueezeAll) {\n const Operator* op = BuildTestOperator(\n BuiltinOptions_SqueezeOptions, CreateSqueezeOptions(builder_).Union());\n void* output_data = nullptr;\n EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_SQUEEZE, &mock_reporter_,\n &mock_allocator_, &output_data));\n}\nTEST_F(FlatbufferConversionsTest, ParseDynamicReshape) {\n const Operator* op = BuildTestOperator(\n BuiltinOptions_ReshapeOptions, CreateReshapeOptions(builder_).Union());\n void* output_data = nullptr;\n EXPECT_EQ(kTfLiteOk, ParseOpData(op, BuiltinOperator_RESHAPE, &mock_reporter_,\n &mock_allocator_, &output_data));\n}\nTEST_F(FlatbufferConversionsTest, TestParseOpDataConv) {\n const Operator* conv_op =\n BuildTestOperator(BuiltinOptions_Conv2DOptions,\n CreateConv2DOptions(builder_, Padding_SAME, 1, 2,\n ActivationFunctionType_RELU, 3, 4)\n .Union());\n void* output_data = nullptr;\n EXPECT_EQ(kTfLiteOk,\n ParseOpData(conv_op, BuiltinOperator_CONV_2D, &mock_reporter_,\n &mock_allocator_, &output_data));\n EXPECT_NE(nullptr, output_data);\n TfLiteConvParams* params = reinterpret_cast(output_data);\n EXPECT_EQ(kTfLitePaddingSame, params->padding);\n EXPECT_EQ(1, params->stride_width);\n EXPECT_EQ(2, params->stride_height);\n EXPECT_EQ(kTfLiteActRelu, params->activation);\n EXPECT_EQ(3, params->dilation_width_factor);\n EXPECT_EQ(4, params->dilation_height_factor);\n}\nTEST_F(FlatbufferConversionsTest, ParseBadFullyConnected) {\n const Operator* conv_op = BuildTestOperator(\n BuiltinOptions_FullyConnectedOptions,\n CreateFullyConnectedOptions(\n builder_, ActivationFunctionType_RELU,\n static_cast(-1), true)\n .Union());\n void* output_data = nullptr;\n EXPECT_EQ(kTfLiteError,\n ParseOpData(conv_op, BuiltinOperator_FULLY_CONNECTED,\n &mock_reporter_, &mock_allocator_, &output_data));\n}\nTEST_F(FlatbufferConversionsTest, TestParseOpDataCustom) {\n const Operator* custom_op =\n BuildTestOperator(BuiltinOptions_NONE, flatbuffers::Offset());\n void* output_data = nullptr;\n EXPECT_EQ(kTfLiteOk,\n ParseOpData(custom_op, BuiltinOperator_CUSTOM, &mock_reporter_,\n &mock_allocator_, &output_data));\n EXPECT_EQ(nullptr, output_data);\n}\nTEST_F(FlatbufferConversionsTest, TestConvertTensorType) {\n TfLiteType type;\n EXPECT_EQ(kTfLiteOk,\n ConvertTensorType(TensorType_FLOAT32, &type, &mock_reporter_));\n EXPECT_EQ(kTfLiteFloat32, type);\n}\nTEST_F(FlatbufferConversionsTest, TestConvertTensorTypeFloat16) {\n TfLiteType type;\n EXPECT_EQ(kTfLiteOk,\n ConvertTensorType(TensorType_FLOAT16, &type, &mock_reporter_));\n EXPECT_EQ(kTfLiteFloat16, type);\n}\nTEST_F(FlatbufferConversionsTest, TestConvertTensorTypeBFloat16) {\n TfLiteType type;\n EXPECT_EQ(kTfLiteOk,\n ConvertTensorType(TensorType_BFLOAT16, &type, &mock_reporter_));\n EXPECT_EQ(kTfLiteBFloat16, type);\n}\nTEST_F(FlatbufferConversionsTest, TestConvertTensorTypeInt4) {\n TfLiteType type;\n EXPECT_EQ(kTfLiteOk,\n ConvertTensorType(TensorType_INT4, &type, &mock_reporter_));\n EXPECT_EQ(kTfLiteInt4, type);\n}\nclass StablehloReduceWindowFlatbufferConversionsTest\n : public FlatbufferConversionsTest {\n public:\n static constexpr int kMaxDims =\n TFLITE_STABLEHLO_REDUCE_WINDOW_PARAMS_MAX_DIMENSION_COUNT;\n static constexpr int64_t kValidValue = 5;\n auto ValidAttr() {\n return builder_.CreateVector(std::vector(kMaxDims, kValidValue));\n }\n auto InvalidAttr() {\n return builder_.CreateVector(\n std::vector(kMaxDims + 1, kValidValue));\n }\n auto ValidPaddingAttr() {\n return builder_.CreateVector(\n std::vector(2 * kMaxDims, kValidValue));\n }\n auto InvalidPaddingAttr() {\n return builder_.CreateVector(\n std::vector(2 * kMaxDims + 1, kValidValue));\n }\n auto EmptyAttr() { return builder_.CreateVector({}); }\n};\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest, Succeeds) {\n const Operator* stablehlo_reduce_window_op = BuildTestOperator(\n BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n builder_.CreateVector({1, 2}),\n builder_.CreateVector({3, 4}),\n builder_.CreateVector({5, 6}),\n builder_.CreateVector({7, 8}),\n builder_.CreateVector({9, 10, 11, 12}),\n 13)\n .Union());\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_reduce_window_op,\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, 2),\n ElementsAre(1, 2));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, 2),\n ElementsAre(3, 4));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, 2),\n ElementsAre(5, 6));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, 2),\n ElementsAre(7, 8));\n EXPECT_THAT(std::make_tuple(output_data->padding, 4),\n ElementsAre(9, 10, 11, 12));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWithNoWindowDimensions) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n 0,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n HasSubstr(\"'window_dimensions' attribute is not optional for \"\n \"'stablehlo.reduce_window' and cannot be empty.\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithNoWindowStrides) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n 0,\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithNoBaseDilations) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n 0,\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithNoWindowDilations) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n 0,\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(1));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest, SucceedsWithNoPadding) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n 0,\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWithEmptyWindowDimensions) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n EmptyAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n HasSubstr(\"'window_dimensions' attribute is not optional for \"\n \"'stablehlo.reduce_window' and cannot be empty.\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithEmptyWindowStrides) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n EmptyAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims), Each(1));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithEmptyBaseDilations) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n EmptyAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims), Each(1));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithEmptyWindowDilations) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n EmptyAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(1));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithEmptyPadding) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n EmptyAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n EXPECT_THAT(std::make_tuple(output_data->window_dimensions, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_strides, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->base_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->window_dilations, kMaxDims),\n Each(kValidValue));\n EXPECT_THAT(std::make_tuple(output_data->padding, 2 * kMaxDims), Each(0));\n EXPECT_THAT(output_data->body_subgraph_index, Eq(13));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n SucceedsWithParamsAtMaxDims) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(mock_reporter_.GetString(), StrEq(\"\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWhenWindowDimensionsHasMoreThanMaxDims) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n InvalidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n AllOf(HasSubstr(\"Found too many dimensions in the input array of \"\n \"operation 'stablehlo.reduce_window'.\"),\n HasSubstr(\"Check the 'window_dimensions' attribute.\")));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWhenWindowStridesHasWrongDimCount) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n InvalidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n HasSubstr(\"'window_strides' attribute of 'stablehlo.reduce_window' does \"\n \"not have the expected size\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWhenBaseDilationsHasWrongDimCount) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n InvalidAttr(),\n ValidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n HasSubstr(\"'base_dilations' attribute of 'stablehlo.reduce_window' does \"\n \"not have the expected size\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWhenWindowDilationsHasWrongDimCount) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n InvalidAttr(),\n ValidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n HasSubstr(\n \"'window_dilations' attribute of 'stablehlo.reduce_window' does \"\n \"not have the expected size\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest,\n FailsWhenPaddingHasWrongDimCount) {\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(ParseOpData(\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_,\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n InvalidPaddingAttr(),\n 13)\n .Union()),\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n HasSubstr(\"'padding' attribute of 'stablehlo.reduce_window' does \"\n \"not have the expected size\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest, FailsWithWrongOptions) {\n const Operator* stablehlo_reduce_window_op =\n BuildTestOperator(BuiltinOptions2_StablehloReduceWindowOptions, 0);\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_reduce_window_op,\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n HasSubstr(\n \"Could not get 'stablehlo.reduce_window' operation parameters.\"));\n}\nTEST_F(StablehloReduceWindowFlatbufferConversionsTest, DeathTests) {\n const Operator* stablehlo_reduce_window_op = BuildTestOperator(\n BuiltinOptions2_StablehloReduceWindowOptions,\n CreateStablehloReduceWindowOptions(\n builder_, ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidAttr(),\n ValidPaddingAttr(), 13)\n .Union());\n TfLiteStablehloReduceWindowParams* output_data = nullptr;\n#ifdef NDEBUG\n GTEST_SKIP();\n#endif\n EXPECT_DEATH(\n ParseOpData(nullptr, BuiltinOperator_STABLEHLO_REDUCE_WINDOW,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW, nullptr,\n &mock_allocator_, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW,\n &mock_reporter_, nullptr, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_reduce_window_op,\n BuiltinOperator_STABLEHLO_REDUCE_WINDOW,\n &mock_reporter_, &mock_allocator_, nullptr),\n \"\");\n}\nclass StablehloPadFlatbufferConversionsTest : public FlatbufferConversionsTest {\n public:\n static constexpr int kMaxDims =\n TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT;\n static constexpr int64_t kValidValue = 5;\n};\nTEST_F(StablehloPadFlatbufferConversionsTest, Succeeds) {\n const Operator* stablehlo_pad_op = BuildTestOperator(\n BuiltinOptions2_StablehloPadOptions,\n CreateStablehloPadOptions(\n builder_,\n builder_.CreateVector({1, 0, -1}),\n builder_.CreateVector({2, 0, -2}),\n builder_.CreateVector({3, 0, 3}))\n .Union());\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteOk);\n EXPECT_THAT(std::make_tuple(output_data->edge_padding_low, 3),\n ElementsAre(1, 0, -1));\n EXPECT_THAT(std::make_tuple(output_data->edge_padding_high, 3),\n ElementsAre(2, 0, -2));\n EXPECT_THAT(std::make_tuple(output_data->interior_padding, 3),\n ElementsAre(3, 0, 3));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingLowPadding) {\n const Operator* stablehlo_pad_op = BuildTestOperator(\n BuiltinOptions2_StablehloPadOptions,\n CreateStablehloPadOptions(\n builder_,\n 0,\n builder_.CreateVector({2, 0, -2}),\n builder_.CreateVector({3, 0, 3}))\n .Union());\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n AllOf(\n HasSubstr(\"Input array not provided for operation 'stablehlo.pad'.\"),\n HasSubstr(\"Check the 'edge_padding_low' attribute.\")));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingHighPadding) {\n const Operator* stablehlo_pad_op = BuildTestOperator(\n BuiltinOptions2_StablehloPadOptions,\n CreateStablehloPadOptions(\n builder_,\n builder_.CreateVector({1, 0, -1}),\n 0,\n builder_.CreateVector({3, 0, 3}))\n .Union());\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n AllOf(\n HasSubstr(\"Input array not provided for operation 'stablehlo.pad'.\"),\n HasSubstr(\"Check the 'edge_padding_high' attribute.\")));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, FailsWithMissingInteriorPadding) {\n const Operator* stablehlo_pad_op = BuildTestOperator(\n BuiltinOptions2_StablehloPadOptions,\n CreateStablehloPadOptions(\n builder_,\n builder_.CreateVector({1, 0, -1}),\n builder_.CreateVector({2, 0, -2}),\n 0)\n .Union());\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(\n mock_reporter_.GetString(),\n AllOf(\n HasSubstr(\"Input array not provided for operation 'stablehlo.pad'.\"),\n HasSubstr(\"Check the 'interior_padding' attribute.\")));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, FailsInconsistentSizes) {\n const Operator* stablehlo_pad_op = BuildTestOperator(\n BuiltinOptions2_StablehloPadOptions,\n CreateStablehloPadOptions(\n builder_,\n builder_.CreateVector({1, 0, -1}),\n builder_.CreateVector({2, 0, -2}),\n builder_.CreateVector({3, 0, -3, 5}))\n .Union());\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n HasSubstr(\"'stablehlo.pad' operation parameter array sizes are \"\n \"not consistent.\"));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, FailsWithWrongOptions) {\n const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);\n TfLiteStablehloPadParams* output_data = nullptr;\n EXPECT_EQ(\n ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, (void**)&output_data),\n kTfLiteError);\n EXPECT_THAT(mock_reporter_.GetString(),\n HasSubstr(\"Could not get 'stablehlo.pad' operation parameters.\"));\n}\nTEST_F(StablehloPadFlatbufferConversionsTest, DeathTests) {\n const Operator* stablehlo_pad_op = BuildTestOperator(BuiltinOptions_NONE, 0);\n TfLiteStablehloPadParams* output_data = nullptr;\n#ifdef NDEBUG\n GTEST_SKIP();\n#endif\n EXPECT_DEATH(\n ParseOpData(nullptr, BuiltinOperator_STABLEHLO_PAD, &mock_reporter_,\n &mock_allocator_, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n nullptr, &mock_allocator_, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, nullptr, (void**)&output_data),\n \"\");\n EXPECT_DEATH(ParseOpData(stablehlo_pad_op, BuiltinOperator_STABLEHLO_PAD,\n &mock_reporter_, &mock_allocator_, nullptr),\n \"\");\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/flatbuffer_conversions_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1183,"cells":{"ID":{"kind":"string","value":"71d031db-472c-419f-8962-e94574569eb3"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"incremental_barrier"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/util/incremental_barrier.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/util/incremental_barrier_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/util/incremental_barrier.h\"\n#include \n#include \n#include \n#include \"absl/functional/bind_front.h\"\n#include \"tensorflow/core/platform/logging.h\"\nnamespace tensorflow {\nclass InternalIncrementalBarrier {\n public:\n explicit InternalIncrementalBarrier(IncrementalBarrier::DoneCallback callback)\n : left_(1), done_callback_(std::move(callback)) {}\n void operator()() {\n DCHECK_GE(left_.load(std::memory_order_relaxed), 0);\n if (left_.fetch_sub(1, std::memory_order_acq_rel) - 1 == 0) {\n IncrementalBarrier::DoneCallback done_callback =\n std::move(done_callback_);\n delete this;\n done_callback();\n }\n }\n IncrementalBarrier::BarrierCallback Inc() {\n left_.fetch_add(1, std::memory_order_acq_rel);\n return absl::bind_front(&InternalIncrementalBarrier::operator(), this);\n }\n private:\n std::atomic left_;\n IncrementalBarrier::DoneCallback done_callback_;\n};\nIncrementalBarrier::IncrementalBarrier(DoneCallback done_callback)\n : internal_barrier_(\n new InternalIncrementalBarrier(std::move(done_callback))) {}\nIncrementalBarrier::~IncrementalBarrier() { (*internal_barrier_)(); }\nIncrementalBarrier::BarrierCallback IncrementalBarrier::Inc() {\n return internal_barrier_->Inc();\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/util/incremental_barrier.h\"\n#include \n#include \"absl/functional/bind_front.h\"\n#include \"absl/time/time.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/platform.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\n#include \"tensorflow/core/platform/thread_annotations.h\"\n#include \"tensorflow/core/platform/threadpool.h\"\nnamespace tensorflow {\nnamespace {\nclass Counter {\n public:\n void Increment() TF_LOCKS_EXCLUDED(mu_) {\n mutex_lock l(mu_);\n ++count_;\n }\n int GetCount() TF_LOCKS_EXCLUDED(mu_) {\n mutex_lock l(mu_);\n return count_;\n }\n private:\n mutex mu_;\n int count_ = 0;\n};\nTEST(IncrementalBarrierTest, RunInstantlyWhenZeroClosure) {\n Counter counter;\n EXPECT_EQ(counter.GetCount(), 0);\n {\n IncrementalBarrier::DoneCallback done_callback =\n absl::bind_front(&Counter::Increment, &counter);\n IncrementalBarrier barrier(done_callback);\n EXPECT_EQ(counter.GetCount(), 0);\n }\n EXPECT_EQ(counter.GetCount(), 1);\n}\nTEST(IncrementalBarrierTest, RunAfterNumClosuresOneNowTwoLater) {\n Counter counter;\n IncrementalBarrier::BarrierCallback bc1, bc2;\n {\n IncrementalBarrier::DoneCallback done_callback =\n absl::bind_front(&Counter::Increment, &counter);\n IncrementalBarrier barrier(done_callback);\n CHECK_EQ(counter.GetCount(), 0);\n bc1 = barrier.Inc();\n bc2 = barrier.Inc();\n IncrementalBarrier::BarrierCallback bc3 = barrier.Inc();\n bc3();\n CHECK_EQ(counter.GetCount(), 0);\n }\n CHECK_EQ(counter.GetCount(), 0);\n bc1();\n CHECK_EQ(counter.GetCount(), 0);\n bc2();\n CHECK_EQ(counter.GetCount(), 1);\n}\nTEST(IncrementalBarrierTest, RunAfterNumClosuresConcurrency) {\n const int num_closure = 100, num_thread = 2;\n std::atomic schedule_count{0};\n Counter counter;\n {\n IncrementalBarrier::DoneCallback done_callback =\n absl::bind_front(&Counter::Increment, &counter);\n IncrementalBarrier barrier(done_callback);\n CHECK_EQ(counter.GetCount(), 0);\n tensorflow::thread::ThreadPool pool(tensorflow::Env::Default(),\n \"BarrierClosure\", num_thread);\n for (int i = 0; i < num_closure; ++i) {\n pool.Schedule([&barrier, &schedule_count]() {\n schedule_count.fetch_add(1);\n IncrementalBarrier::BarrierCallback bc = barrier.Inc();\n Env::Default()->SleepForMicroseconds(100);\n bc();\n });\n }\n CHECK_EQ(counter.GetCount(), 0);\n }\n CHECK_EQ(schedule_count.load(std::memory_order_relaxed), 100);\n CHECK_EQ(counter.GetCount(), 1);\n}\n#if defined(PLATFORM_GOOGLE)\nvoid BM_FunctionInc(benchmark::State& state) {\n IncrementalBarrier barrier([] {});\n for (auto _ : state) {\n barrier.Inc()();\n }\n}\nBENCHMARK(BM_FunctionInc);\n#endif \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/incremental_barrier_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1184,"cells":{"ID":{"kind":"string","value":"1314d878-27b7-445b-b083-af17ea4b6829"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"executor"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/common_runtime/executor.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/common_runtime/executor_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/executor.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/memory/memory.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/time/time.h\"\n#include \"absl/types/optional.h\"\n#include \"tensorflow/core/activity_watcher/activity.h\"\n#include \"tensorflow/core/common_runtime/costmodel_manager.h\"\n#include \"tensorflow/core/common_runtime/entry.h\"\n#include \"tensorflow/core/common_runtime/executor_factory.h\"\n#include \"tensorflow/core/common_runtime/graph_view.h\"\n#include \"tensorflow/core/common_runtime/immutable_executor_state.h\"\n#include \"tensorflow/core/common_runtime/pending_counts.h\"\n#include \"tensorflow/core/common_runtime/propagator_state.h\"\n#include \"tensorflow/core/common_runtime/renamed_device.h\"\n#include \"tensorflow/core/common_runtime/simple_propagator_state.h\"\n#include \"tensorflow/core/common_runtime/step_stats_collector.h\"\n#include \"tensorflow/core/framework/allocator.h\"\n#include \"tensorflow/core/framework/cancellation.h\"\n#include \"tensorflow/core/framework/collective.h\"\n#include \"tensorflow/core/framework/control_flow.h\"\n#include \"tensorflow/core/framework/device_attributes.pb.h\"\n#include \"tensorflow/core/framework/log_memory.h\"\n#include \"tensorflow/core/framework/metrics.h\"\n#include \"tensorflow/core/framework/node_def_util.h\"\n#include \"tensorflow/core/framework/op_kernel.h\"\n#include \"tensorflow/core/framework/op_segment.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_reference.h\"\n#include \"tensorflow/core/framework/types.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/graph/edgeset.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/graph/graph_node_util.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/core/notification.h\"\n#include \"tensorflow/core/lib/core/status.h\"\n#include \"tensorflow/core/lib/core/threadpool.h\"\n#include \"tensorflow/core/lib/gtl/flatmap.h\"\n#include \"tensorflow/core/lib/gtl/inlined_vector.h\"\n#include \"tensorflow/core/lib/gtl/manual_constructor.h\"\n#include \"tensorflow/core/lib/hash/hash.h\"\n#include \"tensorflow/core/platform/context.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/macros.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/profile_utils/cpu_utils.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/thread_annotations.h\"\n#include \"tensorflow/core/platform/types.h\"\n#include \"tensorflow/core/profiler/lib/annotated_traceme.h\"\n#include \"tensorflow/core/profiler/lib/connected_traceme.h\"\n#include \"tensorflow/core/profiler/lib/context_types.h\"\n#include \"tensorflow/core/profiler/lib/scoped_annotation.h\"\n#include \"tensorflow/core/profiler/lib/traceme.h\"\n#include \"tensorflow/core/profiler/lib/traceme_encode.h\"\n#include \"tensorflow/core/protobuf/error_codes.pb.h\"\n#include \"tensorflow/core/util/determinism.h\"\n#include \"tensorflow/core/util/managed_stack_trace.h\"\n#include \"tensorflow/core/util/tensor_slice_reader_cache.h\"\n#include \"tsl/platform/tracing.h\"\nnamespace tensorflow {\nnamespace {\nstatic const Tensor* const kEmptyTensor = new Tensor;\nnamespace nodestats {\ninline int64_t NowInNsec() { return EnvTime::NowNanos(); }\nvoid SetScheduled(NodeExecStatsInterface* stats, int64_t micros) {\n if (!stats) return;\n stats->SetScheduled(micros * EnvTime::kMicrosToNanos);\n}\nvoid SetAllStart(NodeExecStatsInterface* stats) {\n if (!stats) return;\n stats->RecordExecutorStarted();\n}\nvoid SetOpStart(NodeExecStatsInterface* stats) {\n if (!stats) return;\n stats->RecordComputeStarted();\n}\nvoid SetOpEnd(NodeExecStatsInterface* stats) {\n if (!stats) return;\n stats->RecordComputeEnded();\n}\nvoid SetAllEnd(NodeExecStatsInterface* stats) {\n if (!stats) return;\n stats->RecordExecutorEnded();\n}\nvoid SetOutput(NodeExecStatsInterface* stats, int slot, const Tensor* v) {\n if (!stats) return;\n stats->SetOutput(slot, v);\n}\nvoid SetMemory(NodeExecStatsInterface* stats, OpKernelContext* ctx) {\n if (!stats) return;\n stats->SetMemory(ctx);\n}\n} \nstruct KernelTimer {\n uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();\n uint64 ElapsedCycles() {\n return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;\n }\n};\ntypedef absl::InlinedVector TensorValueVec;\ntypedef absl::InlinedVector AllocatorAttributeVec;\nclass ExecutorImpl : public Executor {\n public:\n explicit ExecutorImpl(const LocalExecutorParams& p) : immutable_state_(p) {}\n Status Initialize(const Graph& graph) {\n TF_RETURN_IF_ERROR(immutable_state_.Initialize(graph));\n kernel_stats_.Initialize(immutable_state_.graph_view());\n return absl::OkStatus();\n }\n private:\n void RunAsyncInternal(const Args& args, DoneCallback done) override;\n template \n friend class ExecutorState;\n class KernelStats {\n public:\n KernelStats() = default;\n void Initialize(const GraphView& gview) {\n is_expensive_.resize(gview.num_nodes());\n cost_estimates_ =\n std::make_unique(gview.num_nodes());\n for (int32_t i = 0; i < gview.num_nodes(); ++i) {\n if (gview.node(i)) {\n is_expensive_[i] =\n gview.node(i)->kernel && gview.node(i)->kernel->IsExpensive();\n cost_estimates_[i] = kInitialCostEstimateCycles;\n }\n }\n }\n bool IsExpensive(const NodeItem& node) const {\n return is_expensive_[node.node_id] &&\n (cost_estimates_[node.node_id].load(std::memory_order_relaxed) >\n kOpIsExpensiveThresholdCycles);\n }\n bool HasExpensiveMarker(const NodeItem& node) const {\n return is_expensive_[node.node_id];\n }\n void UpdateCostEstimate(const NodeItem& node, uint64 elapsed_cycles) {\n std::atomic_uint_fast64_t& cost_estimate = cost_estimates_[node.node_id];\n auto prev_estimate = cost_estimate.load(std::memory_order_relaxed);\n uint64 new_estimate =\n ((kCostDecay - 1) * prev_estimate + elapsed_cycles) / kCostDecay;\n cost_estimate.store(new_estimate, std::memory_order_relaxed);\n }\n private:\n static constexpr uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;\n static constexpr uint64 kOpIsExpensiveThresholdCycles = 8000;\n static constexpr uint64 kCostDecay = 10;\n std::vector is_expensive_;\n std::unique_ptr cost_estimates_;\n };\n ImmutableExecutorState immutable_state_;\n KernelStats kernel_stats_;\n ExecutorImpl(const ExecutorImpl&) = delete;\n void operator=(const ExecutorImpl&) = delete;\n};\ntemplate \nclass ExecutorState {\n public:\n ExecutorState(const Executor::Args& args,\n const ImmutableExecutorState& immutable_state_,\n ExecutorImpl::KernelStats* kernel_stats_);\n ~ExecutorState();\n void RunAsync(Executor::DoneCallback done);\n private:\n typedef typename PropagatorStateType::TaggedNode TaggedNode;\n typedef\n typename PropagatorStateType::TaggedNodeReadyQueue TaggedNodeReadyQueue;\n typedef typename PropagatorStateType::TaggedNodeSeq TaggedNodeSeq;\n struct AsyncState;\n void Process(const TaggedNode& node, int64_t scheduled_nsec);\n void ProcessInline(TaggedNodeReadyQueue* inline_ready,\n int64_t scheduled_nsec);\n Status ProcessSync(const NodeItem& item, OpKernelContext::Params* params,\n EntryVector* outputs, NodeExecStatsInterface* stats);\n void ProcessAsync(const NodeItem& item, const OpKernelContext::Params& params,\n const TaggedNode& tagged_node, Entry* first_input,\n NodeExecStatsInterface* stats,\n activity_watcher::ActivityId activity_id);\n void ProcessNoop(NodeExecStatsInterface* stats);\n void ProcessConstTensor(const NodeItem& item, EntryVector* outputs,\n NodeExecStatsInterface* stats);\n Status PrepareInputs(const NodeItem& item, Entry* first_input,\n TensorValueVec* inputs,\n AllocatorAttributeVec* input_alloc_attrs,\n bool* is_input_dead);\n Status ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,\n Entry* outputs, NodeExecStatsInterface* stats);\n bool NodeDone(const Status& s, TaggedNodeSeq* ready,\n NodeExecStatsInterface* stats,\n TaggedNodeReadyQueue* inline_ready);\n void ScheduleReady(TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready);\n template \n void RunTask(Closure&& c, int sample_rate = 0);\n void Finish();\n void ScheduleFinish();\n DeviceContext* device_context_ = nullptr;\n const bool vlog_; \n const bool log_memory_;\n int64_t step_id_;\n int64_t trace_id_; \n int64_t start_time_usecs_ = 0;\n absl::optional deadline_;\n static constexpr uint64 kInlineScheduleReadyThreshold = 500;\n RendezvousInterface* rendezvous_;\n CollectiveExecutor* collective_executor_ = nullptr;\n const ConfigProto* const session_config_;\n SessionState* session_state_;\n string session_handle_;\n const SessionMetadata* session_metadata_ = nullptr;\n TensorStore* tensor_store_;\n ScopedStepContainer* step_container_;\n StepStatsCollectorInterface* const stats_collector_;\n const tsl::tracing::EventCollector* const event_collector_;\n Context context_;\n checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache_;\n CallFrameInterface* call_frame_;\n const ImmutableExecutorState& immutable_state_;\n ExecutorImpl::KernelStats* const kernel_stats_;\n CancellationManager* cancellation_manager_;\n tsl::CoordinationServiceAgent* coordination_service_agent_;\n absl::optional stack_trace_ = absl::nullopt;\n std::unique_ptr user_device_;\n Executor::Args::Runner runner_;\n bool sync_on_finish_;\n const bool run_all_kernels_inline_;\n PropagatorStateType propagator_;\n Executor::DoneCallback done_cb_;\n std::atomic_int_fast32_t num_outstanding_ops_;\n mutex num_deferred_ops_mu_;\n int64_t num_deferred_ops_ TF_GUARDED_BY(num_deferred_ops_mu_) = 0;\n bool finish_when_deferred_ops_done_ TF_GUARDED_BY(num_deferred_ops_mu_) =\n false;\n mutex mu_;\n Status status_ TF_GUARDED_BY(mu_);\n};\ntemplate \nExecutorState::ExecutorState(\n const Executor::Args& args, const ImmutableExecutorState& immutable_state,\n ExecutorImpl::KernelStats* kernel_stats)\n : vlog_(VLOG_IS_ON(1)),\n log_memory_(LogMemory::IsEnabled()),\n step_id_(args.step_id),\n trace_id_(args.function_trace_id ? *args.function_trace_id : step_id_),\n start_time_usecs_(args.start_time_usecs),\n deadline_(args.deadline),\n rendezvous_(args.rendezvous),\n collective_executor_(args.collective_executor),\n session_config_(args.session_config),\n session_state_(args.session_state),\n session_handle_(args.session_handle),\n session_metadata_(immutable_state.params().session_metadata),\n tensor_store_(args.tensor_store),\n step_container_(args.step_container),\n stats_collector_(args.stats_collector),\n event_collector_(tsl::tracing::GetEventCollector(\n tsl::tracing::EventCategory::kCompute)),\n context_(ContextKind::kThread),\n slice_reader_cache_(new checkpoint::TensorSliceReaderCacheWrapper),\n call_frame_(args.call_frame),\n immutable_state_(immutable_state),\n kernel_stats_(kernel_stats),\n cancellation_manager_(args.cancellation_manager),\n coordination_service_agent_(args.coordination_service_agent),\n stack_trace_(args.stack_trace),\n runner_(args.runner),\n sync_on_finish_(args.sync_on_finish),\n run_all_kernels_inline_(args.run_all_kernels_inline),\n propagator_(immutable_state, step_id_, vlog_),\n num_outstanding_ops_(0) {\n if (args.user_intra_op_threadpool != nullptr) {\n Device* device = immutable_state_.params().device;\n user_device_ = RenamedDevice::NewRenamedDevice(\n device->name(), device, false, false, args.user_intra_op_threadpool);\n }\n}\ntemplate \nExecutorState::~ExecutorState() {\n if (device_context_) {\n device_context_->Unref();\n }\n delete slice_reader_cache_;\n}\ntemplate \ntemplate \nvoid ExecutorState::RunTask(Closure&& c, int sample_rate) {\n alignas(64) static std::atomic num_enqueue_ops{0};\n alignas(64) static std::atomic num_dequeue_ops{0};\n auto n_enqueues = num_enqueue_ops.fetch_add(1, std::memory_order_relaxed);\n if (n_enqueues % std::max(16, sample_rate) == 0) {\n auto n_dequeues = num_dequeue_ops.load(std::memory_order_relaxed);\n metrics::UpdateGraphPendingQueueLength(n_enqueues - n_dequeues);\n }\n runner_([c = std::forward(c)]() mutable {\n num_dequeue_ops.fetch_add(1, std::memory_order_relaxed);\n std::forward(c)();\n });\n}\ntemplate \nvoid ExecutorState::RunAsync(Executor::DoneCallback done) {\n TaggedNodeSeq ready;\n Device* device = immutable_state_.params().device;\n const Status get_context_status =\n device->TryGetDeviceContext(&device_context_);\n if (!get_context_status.ok()) {\n delete this;\n done(get_context_status);\n return;\n }\n ready.reserve(immutable_state_.root_nodes().size());\n propagator_.ActivateRoots(immutable_state_.root_nodes(), &ready);\n num_outstanding_ops_ = ready.size();\n if (ready.empty()) {\n delete this;\n done(absl::OkStatus());\n } else {\n done_cb_ = std::move(done);\n ScheduleReady(&ready, nullptr);\n }\n}\ntemplate \nstruct ExecutorState::AsyncState {\n AsyncState(const OpKernelContext::Params& p, const TaggedNode& _tagged_node,\n const NodeItem* _item, Entry* _first_input,\n NodeExecStatsInterface* _stats)\n : saved_inputs(p.inputs.begin(), p.inputs.end()),\n saved_input_alloc_attrs(p.input_alloc_attrs.begin(),\n p.input_alloc_attrs.end()),\n params(p),\n tagged_node(_tagged_node),\n item(_item),\n first_input(_first_input),\n ctx(ParamsButClearingEigenGPUDevice(&params), item->num_outputs),\n stats(_stats) {\n params.inputs = saved_inputs;\n params.input_alloc_attrs = saved_input_alloc_attrs;\n }\n TensorValueVec saved_inputs;\n AllocatorAttributeVec saved_input_alloc_attrs;\n OpKernelContext::Params params;\n TaggedNode tagged_node;\n const NodeItem* item;\n Entry* first_input;\n OpKernelContext ctx;\n NodeExecStatsInterface* stats;\n private:\n OpKernelContext::Params* ParamsButClearingEigenGPUDevice(\n OpKernelContext::Params* p) {\n p->eigen_gpu_device = nullptr; \n return p;\n }\n};\nbool MightTrace(const tsl::tracing::EventCollector* event_collector,\n bool is_expensive) {\n if (event_collector != nullptr) {\n return true;\n }\n if (tsl::profiler::ScopedAnnotation::IsEnabled()) return true;\n return tsl::profiler::TraceMe::Active(\n tsl::profiler::GetTFTraceMeLevel(is_expensive));\n}\ntemplate \nStatus ExecutorState::ProcessSync(\n const NodeItem& item, OpKernelContext::Params* params, EntryVector* outputs,\n NodeExecStatsInterface* stats) {\n Status s;\n OpKernelContext ctx(params, item.num_outputs);\n nodestats::SetOpStart(stats);\n OpKernel* op_kernel = item.kernel;\n Device* device = immutable_state_.params().device;\n const bool is_expensive = kernel_stats_->IsExpensive(item);\n if (TF_PREDICT_FALSE(MightTrace(event_collector_, is_expensive))) {\n tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kCompute,\n op_kernel->name_view());\n profiler::AnnotatedTraceMe activity(\n [op_kernel, &ctx] {\n return op_kernel->TraceString(\n ctx, tsl::profiler::TfOpDetailsEnabled());\n },\n tsl::profiler::GetTFTraceMeLevel(is_expensive));\n device->Compute(op_kernel, &ctx);\n } else if (kernel_stats_->HasExpensiveMarker(item)) {\n KernelTimer timer;\n device->Compute(op_kernel, &ctx);\n constexpr int kKernelExecutionTrackingInvocationSkipCount = 16;\n if (is_expensive ||\n timer.start_cycles % kKernelExecutionTrackingInvocationSkipCount == 0) {\n kernel_stats_->UpdateCostEstimate(item, timer.ElapsedCycles());\n }\n } else {\n device->Compute(op_kernel, &ctx);\n }\n nodestats::SetOpEnd(stats);\n if (outputs->size() < item.num_outputs) outputs->resize(item.num_outputs);\n s = ProcessOutputs(item, &ctx, outputs->data(), stats);\n nodestats::SetMemory(stats, &ctx);\n return s;\n}\ntemplate \nvoid ExecutorState::ProcessAsync(\n const NodeItem& item, const OpKernelContext::Params& params,\n const TaggedNode& tagged_node, Entry* first_input,\n NodeExecStatsInterface* stats, activity_watcher::ActivityId activity_id) {\n AsyncOpKernel* async_kernel = item.kernel->AsAsync();\n DCHECK(async_kernel != nullptr);\n AsyncState* state =\n new AsyncState(params, tagged_node, &item, first_input, stats);\n nodestats::SetOpStart(stats);\n {\n profiler::AnnotatedTraceMe activity(\n [async_kernel, state] {\n return async_kernel->TraceString(\n state->ctx, tsl::profiler::TfOpDetailsEnabled());\n },\n tsl::profiler::GetTFTraceMeLevel(false));\n tsl::profiler::TraceMeProducer producer(\n [&] {\n return tsl::profiler::TraceMeEncode(\n \"ExecutorState::ProcessAsync::Start\",\n {{\"name\", async_kernel->name()},\n {\"kernel_type\", async_kernel->type_string()},\n {\"step_id\", step_id_}});\n },\n tsl::profiler::ContextType::kTfExecutor);\n auto done = [this, state, activity_id, ctx_id = producer.GetContextId()]() {\n tsl::profiler::TraceMeConsumer consumer(\n [&] {\n return profiler::TraceMeEncode(\n \"ExecutorState::ProcessAsync::Done\",\n {{\"name\", state->item->kernel->name()},\n {\"kernel_type\", state->item->kernel->type_string()},\n {\"step_id\", step_id_}});\n },\n tsl::profiler::ContextType::kTfExecutor, ctx_id);\n Device* device = immutable_state_.params().device;\n NodeExecStatsInterface* stats = state->stats; \n Entry* first_input = state->first_input; \n nodestats::SetOpEnd(stats);\n EntryVector outputs(state->item->num_outputs);\n Status s =\n ProcessOutputs(*state->item, &state->ctx, outputs.data(), stats);\n nodestats::SetMemory(stats, &state->ctx);\n if (vlog_) {\n VLOG(2) << \"Async kernel done: \" << state->item->node_id << \" step \"\n << step_id_ << \" \"\n << SummarizeNodeDef(state->item->kernel->def())\n << (state->tagged_node.get_is_dead() ? \" is dead\" : \"\")\n << \" device: \" << device->name();\n }\n const int num_inputs = state->item->num_inputs;\n for (int i = 0; i < num_inputs; ++i) {\n (first_input + i)->ClearVal();\n }\n propagator_.MaybeMarkCompleted(state->tagged_node);\n activity_watcher::ActivityEnd(activity_id);\n TaggedNodeSeq ready;\n if (s.ok()) {\n propagator_.PropagateOutputs(state->tagged_node, &outputs, &ready);\n }\n outputs.clear();\n const bool completed = NodeDone(s, &ready, stats, nullptr);\n delete state;\n if (completed) ScheduleFinish();\n };\n immutable_state_.params().device->ComputeAsync(async_kernel, &state->ctx,\n std::move(done));\n }\n}\ntemplate \nvoid ExecutorState::ProcessNoop(\n NodeExecStatsInterface* stats) {\n nodestats::SetOpStart(stats);\n nodestats::SetOpEnd(stats);\n}\ntemplate \nvoid ExecutorState::ProcessConstTensor(\n const NodeItem& item, EntryVector* outputs, NodeExecStatsInterface* stats) {\n nodestats::SetOpStart(stats);\n nodestats::SetOpEnd(stats);\n Entry& output = (*outputs)[0];\n output.state = Entry::State::HAS_CONST_TENSOR;\n output.const_tensor = item.const_tensor;\n output.alloc_attr = item.output_attrs()[0];\n}\ntemplate \nvoid ExecutorState::Process(const TaggedNode& tagged_node,\n int64_t scheduled_nsec) {\n tsl::profiler::TraceMe traceme(\"ExecutorState::Process Scheduled\",\n tsl::profiler::TraceMeLevel::kVerbose);\n TaggedNodeReadyQueue inline_ready;\n inline_ready.push_back(tagged_node);\n return ProcessInline(&inline_ready, scheduled_nsec);\n}\ntemplate \nvoid ExecutorState::ProcessInline(\n TaggedNodeReadyQueue* inline_ready, int64_t scheduled_nsec) {\n WithContext wc(context_);\n auto ready = std::make_unique();\n auto inputs = std::make_unique();\n AllocatorAttributeVec input_alloc_attrs;\n auto params = std::make_unique();\n params->step_id = step_id_;\n Device* device = immutable_state_.params().device;\n if (user_device_) {\n params->device = user_device_.get();\n } else {\n params->device = device;\n }\n params->start_time_usecs = start_time_usecs_;\n params->deadline = deadline_;\n params->log_memory = log_memory_;\n params->rendezvous = rendezvous_;\n params->collective_executor = collective_executor_;\n params->session_config = session_config_;\n params->session_state = session_state_;\n params->session_handle = session_handle_;\n params->session_metadata = session_metadata_;\n params->tensor_store = tensor_store_;\n params->cancellation_manager = cancellation_manager_;\n params->coordination_service_agent = coordination_service_agent_;\n params->stack_trace = stack_trace_;\n params->call_frame = call_frame_;\n params->function_library = immutable_state_.params().function_library;\n params->resource_manager = device->resource_manager();\n params->step_container = step_container_;\n params->slice_reader_cache = slice_reader_cache_;\n params->runner = &runner_;\n params->run_all_kernels_inline = run_all_kernels_inline_;\n params->stats_collector = stats_collector_;\n params->inc_num_deferred_ops_function = [this]() {\n mutex_lock lock(num_deferred_ops_mu_);\n num_deferred_ops_++;\n };\n params->dec_num_deferred_ops_function = [this]() {\n bool finish_when_deferred_ops_done = false;\n {\n mutex_lock lock(num_deferred_ops_mu_);\n num_deferred_ops_--;\n if (num_deferred_ops_ == 0) {\n finish_when_deferred_ops_done = finish_when_deferred_ops_done_;\n }\n }\n if (finish_when_deferred_ops_done) Finish();\n };\n params->op_device_context = device_context_;\n Status s;\n NodeExecStatsInterface* stats = nullptr;\n EntryVector outputs(1);\n bool completed = false;\n int64_t last_iter_num = -1;\n std::unique_ptr iteration_scope;\n while (!inline_ready->empty()) {\n TaggedNode tagged_node = inline_ready->front();\n int64_t current_iter_num = tagged_node.get_iter_num();\n if (current_iter_num != last_iter_num) {\n iteration_scope = std::make_unique(\n [&] {\n return profiler::TraceMeEncode(\n \"ExecutorState::Process\",\n {{\"id\", step_id_}, {\"iter_num\", tagged_node.get_iter_num()}});\n },\n tsl::profiler::ContextType::kTfExecutor, trace_id_,\n tsl::profiler::TraceMeLevel::kInfo);\n last_iter_num = current_iter_num;\n }\n inline_ready->pop_front();\n const NodeItem& item = tagged_node.get_node_item();\n const int id = item.node_id;\n propagator_.MaybeMarkStarted(tagged_node);\n const activity_watcher::ActivityId activity_id =\n activity_watcher::ActivityStart(\n [&]() {\n return std::make_unique(\n \"ExecutorState::Process\",\n activity_watcher::ActivityCategory::kMisc,\n activity_watcher::Activity::Attributes{\n {\"node_name\", item.kernel->def().name()},\n {\"op\", item.kernel->def().op()},\n {\"iter_num\", absl::StrCat(tagged_node.get_iter_num())},\n {\"step_id\", absl::StrCat(params->step_id)},\n {\"node_id\", absl::StrCat(id)},\n {\"device\", device->name()},\n {\"inputs\",\n absl::StrJoin(item.kernel->def().input(), \"; \")},\n {\"original_node_names\",\n absl::StrJoin(item.kernel->def()\n .experimental_debug_info()\n .original_node_names(),\n \"; \")},\n {\"original_func_names\",\n absl::StrJoin(item.kernel->def()\n .experimental_debug_info()\n .original_func_names(),\n \"; \")},\n });\n },\n 2);\n params->track_allocations = false;\n stats = nullptr;\n if (stats_collector_ && !tagged_node.get_is_dead()) {\n stats = stats_collector_->CreateNodeExecStats(&item.kernel->def());\n params->track_allocations = stats ? stats->TrackAllocations() : false;\n nodestats::SetScheduled(stats, scheduled_nsec);\n nodestats::SetAllStart(stats);\n }\n if (vlog_) {\n VLOG(1) << \"Process node: \" << id << \" step \" << params->step_id << \" \"\n << SummarizeNodeDef(item.kernel->def())\n << (tagged_node.get_is_dead() ? \" is dead\" : \"\")\n << \" device: \" << device->name();\n }\n Entry* first_input = propagator_.GetInputTensors(tagged_node);\n bool launched_asynchronously = false;\n if (tagged_node.get_is_dead() && !item.is_transfer_node) {\n if (outputs.size() < item.num_outputs) outputs.resize(item.num_outputs);\n } else if (TF_PREDICT_FALSE(item.is_noop)) {\n ProcessNoop(stats);\n } else if (item.const_tensor != nullptr && !params->track_allocations) {\n ProcessConstTensor(item, &outputs, stats);\n } else {\n bool is_input_dead = false;\n s = PrepareInputs(item, first_input, inputs.get(), &input_alloc_attrs,\n &is_input_dead);\n if (!s.ok()) {\n const int num_inputs = item.num_inputs;\n for (int i = 0; i < num_inputs; ++i) {\n (first_input + i)->ClearVal();\n }\n propagator_.MaybeMarkCompleted(tagged_node);\n activity_watcher::ActivityEnd(activity_id);\n completed = NodeDone(s, ready.get(), stats, inline_ready);\n continue;\n }\n params->op_kernel = item.kernel;\n params->frame_iter = propagator_.GetFrameAndIter(tagged_node);\n params->is_input_dead = is_input_dead;\n params->output_attr_array = item.output_attrs();\n params->forward_from_array = item.forward_from();\n params->outputs_required_array = item.outputs_required.get();\n params->inputs = *inputs;\n params->input_alloc_attrs = input_alloc_attrs;\n if (item.kernel_is_async) {\n ProcessAsync(item, *params, tagged_node, first_input, stats,\n activity_id);\n launched_asynchronously = true;\n } else {\n s = ProcessSync(item, params.get(), &outputs, stats);\n }\n }\n if (!launched_asynchronously) {\n if (vlog_) {\n VLOG(2) << \"Synchronous kernel done: \" << id << \" step \"\n << params->step_id << \" \"\n << SummarizeNodeDef(item.kernel->def())\n << (tagged_node.get_is_dead() ? \" is dead: \" : \"\")\n << \" device: \" << device->name();\n }\n const int num_inputs = item.num_inputs;\n for (int i = 0; i < num_inputs; ++i) {\n (first_input + i)->ClearVal();\n }\n propagator_.MaybeMarkCompleted(tagged_node);\n activity_watcher::ActivityEnd(activity_id);\n if (s.ok()) {\n propagator_.PropagateOutputs(tagged_node, &outputs, ready.get());\n }\n const int num_outputs = item.num_outputs;\n for (int i = 0; i < num_outputs; ++i) {\n outputs[i].ClearVal();\n }\n if (stats) {\n scheduled_nsec = nodestats::NowInNsec();\n }\n completed = NodeDone(s, ready.get(), stats, inline_ready);\n }\n } \n if (completed) ScheduleFinish();\n}\ntemplate \nStatus ExecutorState::PrepareInputs(\n const NodeItem& item, Entry* first_input, TensorValueVec* inputs,\n AllocatorAttributeVec* input_alloc_attrs, bool* is_input_dead) {\n inputs->resize(item.num_inputs);\n input_alloc_attrs->resize(item.num_inputs);\n *is_input_dead = false;\n for (int i = 0; i < item.num_inputs; ++i) {\n const bool expect_ref = TF_PREDICT_FALSE(item.is_any_input_ref_typed) &&\n IsRefType(item.input_type(i));\n Entry* entry = first_input + i;\n (*input_alloc_attrs)[i] = entry->alloc_attr;\n TensorValue* inp = &(*inputs)[i];\n switch (entry->state) {\n case Entry::State::NO_VALUE: {\n inp->mutex_if_ref = nullptr;\n if (item.is_merge) {\n inp->tensor = nullptr;\n } else {\n DCHECK(item.is_transfer_node)\n << item.kernel->name() << \" - input \" << i;\n entry->state = Entry::State::HAS_CONST_TENSOR;\n entry->const_tensor = kEmptyTensor;\n inp->tensor = const_cast(kEmptyTensor);\n *is_input_dead = true;\n }\n break;\n }\n case Entry::State::HAS_VALUE: {\n if (TF_PREDICT_FALSE(expect_ref)) {\n return AttachDef(\n errors::InvalidArgument(i, \"-th input expects a ref type\"),\n item.kernel->def());\n }\n inp->mutex_if_ref = nullptr;\n inp->tensor = entry->val.get();\n break;\n }\n case Entry::State::HAS_CONST_TENSOR: {\n if (TF_PREDICT_FALSE(expect_ref)) {\n return AttachDef(\n errors::InvalidArgument(i, \"-th input expects a ref type\"),\n item.kernel->def());\n }\n inp->mutex_if_ref = nullptr;\n inp->tensor = const_cast(entry->const_tensor);\n break;\n }\n case Entry::State::HAS_REF_TENSOR: {\n {\n tf_shared_lock ml(*entry->ref_tensor.mu);\n if (TF_PREDICT_FALSE(!entry->ref_tensor.tensor->IsInitialized() &&\n !item.is_initialization_op)) {\n return AttachDef(errors::FailedPrecondition(\n \"Attempting to use uninitialized value \",\n item.kernel->requested_input(i)),\n item.kernel->def());\n }\n }\n if (expect_ref) {\n inp->mutex_if_ref = entry->ref_tensor.mu;\n inp->tensor = entry->ref_tensor.tensor;\n } else {\n {\n mutex* ref_mu = entry->ref_tensor.mu;\n Tensor* ref_tensor = entry->ref_tensor.tensor;\n tf_shared_lock l(*ref_mu);\n entry->val.Init(*ref_tensor);\n }\n entry->state = Entry::State::HAS_VALUE;\n inp->mutex_if_ref = nullptr;\n inp->tensor = entry->val.get();\n if (TF_PREDICT_FALSE(item.input_type(i) != inp->tensor->dtype())) {\n return AttachDef(\n errors::InvalidArgument(\n i, \"-th input expects type \",\n DataTypeString(item.input_type(i)),\n \" but automatically dereferenced input tensor has type \",\n DataTypeString(inp->tensor->dtype())),\n item.kernel->def());\n }\n }\n break;\n }\n }\n }\n return absl::OkStatus();\n}\ntemplate \nStatus ExecutorState::ProcessOutputs(\n const NodeItem& item, OpKernelContext* ctx, Entry* outputs,\n NodeExecStatsInterface* stats) {\n Status s = ctx->status();\n if (!s.ok()) {\n s = AttachDef(s, item.kernel->def());\n if (vlog_ && VLOG_IS_ON(1)) {\n LOG(WARNING) << this << \" Compute status: \" << s;\n }\n if (s.code() == error::RESOURCE_EXHAUSTED) {\n if (stats_collector_) {\n string err =\n stats_collector_->ReportAllocsOnResourceExhausted(s.message());\n s = errors::CreateWithUpdatedMessage(s,\n strings::StrCat(s.message(), err));\n } else {\n s = errors::CreateWithUpdatedMessage(\n s,\n strings::StrCat(\n s.message(),\n \"\\nHint: If you want to see a list of allocated tensors when \"\n \"OOM happens, add report_tensor_allocations_upon_oom \"\n \"to RunOptions for current allocation info. This isn't \"\n \"available when running in Eager mode.\\n\"));\n }\n } else if (s.code() == error::UNAVAILABLE &&\n !item.is_distributed_communication) {\n s = errors::ReplaceErrorFromNonCommunicationOps(s, item.kernel->name());\n }\n return ADD_SOURCE_LOCATION(s);\n }\n for (int i = 0; i < item.num_outputs; ++i) {\n const TensorValue val = ctx->release_output(i);\n Entry* out = &outputs[i];\n DCHECK(out->state == Entry::State::NO_VALUE);\n if (val.tensor == nullptr) {\n if (!(item.is_recv_or_switch ||\n (item.outputs_required && !item.outputs_required[i]))) {\n s.Update(errors::Internal(\"Missing \", i, \"-th output from \",\n FormatNodeDefForError(item.kernel->def())));\n }\n } else {\n out->alloc_attr = ctx->output_alloc_attr(i);\n DataType dtype = val.dtype_safe();\n if (dtype == item.output_type(i)) {\n if (stats && val.tensor->IsInitialized()) {\n nodestats::SetOutput(stats, i, val.tensor);\n }\n if (val.is_ref()) {\n out->state = Entry::State::HAS_REF_TENSOR;\n out->ref_tensor.tensor = val.tensor;\n out->ref_tensor.mu = val.mutex_if_ref;\n if (log_memory_) {\n Tensor to_log;\n {\n tf_shared_lock l(*out->ref_tensor.mu);\n to_log = *out->ref_tensor.tensor;\n }\n LogMemory::RecordTensorOutput(ctx->op_kernel().name(),\n ctx->step_id(), i, to_log);\n }\n } else {\n out->state = Entry::State::HAS_VALUE;\n out->val.Init(std::move(*val.tensor));\n if (log_memory_) {\n LogMemory::RecordTensorOutput(ctx->op_kernel().name(),\n ctx->step_id(), i, *out->val);\n }\n }\n } else {\n s.Update(\n errors::Internal(\"Output \", i, \" of type \", DataTypeString(dtype),\n \" does not match declared output type \",\n DataTypeString(item.output_type(i)), \" for node \",\n FormatNodeDefForError(item.kernel->def())));\n }\n }\n if (!val.is_ref()) {\n delete val.tensor;\n }\n }\n return s;\n}\ntemplate \nbool ExecutorState::NodeDone(\n const Status& s, TaggedNodeSeq* ready, NodeExecStatsInterface* stats,\n TaggedNodeReadyQueue* inline_ready) {\n if (stats) {\n nodestats::SetAllEnd(stats);\n DCHECK_NE(stats_collector_, nullptr);\n stats->Done(immutable_state_.params().device->name());\n }\n if (TF_PREDICT_TRUE(s.ok())) {\n const size_t ready_size = ready->size();\n if (ready_size == 0) {\n return num_outstanding_ops_.fetch_sub(1) == 1;\n } else {\n if (ready_size > 1) {\n num_outstanding_ops_.fetch_add(ready_size - 1,\n std::memory_order_relaxed);\n }\n ScheduleReady(ready, inline_ready);\n return false;\n }\n } else {\n bool abort_run = false;\n Status maybe_derived_s(s);\n {\n mutex_lock l(mu_);\n if (status_.ok()) {\n abort_run = true;\n if (cancellation_manager_ && cancellation_manager_->IsCancelled() &&\n (errors::IsCancelled(s) || errors::IsAborted(s))) {\n status_ = StatusGroup::MakeDerived(s);\n maybe_derived_s = status_;\n } else {\n status_ = s;\n }\n }\n }\n if (abort_run) {\n TRACEPRINTF(\"StartAbort: %s\", s.ToString());\n if (cancellation_manager_) {\n VLOG(1) << \"[\" << immutable_state_.params().device->name()\n << \"] Executor start aborting: \" << s;\n }\n if (rendezvous_) {\n rendezvous_->StartAbort(s);\n }\n if (cancellation_manager_) {\n cancellation_manager_->StartCancelWithStatus(maybe_derived_s);\n } else if (collective_executor_) {\n collective_executor_->StartAbort(s);\n }\n }\n return num_outstanding_ops_.fetch_sub(1) == 1;\n }\n}\ntemplate \nvoid ExecutorState::ScheduleReady(\n TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready) {\n tsl::profiler::TraceMe activity(\n [&]() {\n return strings::StrCat(\n \"ExecutorState::ScheduleReady#\",\n \"ready_size=\", (ready == nullptr ? -1 : ready->size()),\n \",inline_ready_size=\",\n (inline_ready == nullptr ? -1 : inline_ready->size()), \"#\");\n },\n tsl::profiler::GetTFTraceMeLevel(false));\n DCHECK(!ready->empty());\n int64_t scheduled_nsec = 0;\n if (stats_collector_) {\n scheduled_nsec = nodestats::NowInNsec();\n }\n if (run_all_kernels_inline_) {\n if (inline_ready == nullptr) {\n RunTask([this, ready = std::move(*ready), scheduled_nsec]() {\n for (auto& tagged_node : ready) {\n Process(tagged_node, scheduled_nsec);\n }\n });\n } else {\n for (auto& tagged_node : *ready) {\n inline_ready->push_back(tagged_node);\n }\n }\n } else {\n const TaggedNode* curr_expensive_node = nullptr;\n TaggedNodeSeq expensive_nodes;\n if (inline_ready == nullptr) {\n for (auto& tagged_node : *ready) {\n RunTask([=]() { Process(tagged_node, scheduled_nsec); },\n ready->size());\n }\n } else {\n for (auto& tagged_node : *ready) {\n const NodeItem& item = *tagged_node.node_item;\n if (tagged_node.get_is_dead() || !kernel_stats_->IsExpensive(item)) {\n inline_ready->push_back(tagged_node);\n } else {\n if (curr_expensive_node) {\n expensive_nodes.push_back(*curr_expensive_node);\n }\n curr_expensive_node = &tagged_node;\n }\n }\n }\n if (curr_expensive_node) {\n if (inline_ready->empty()) {\n inline_ready->push_back(*curr_expensive_node);\n } else {\n expensive_nodes.push_back(*curr_expensive_node);\n }\n }\n if (!expensive_nodes.empty()) {\n if (expensive_nodes.size() < kInlineScheduleReadyThreshold) {\n for (auto& tagged_node : expensive_nodes) {\n RunTask(std::bind(&ExecutorState::Process, this, tagged_node,\n scheduled_nsec),\n expensive_nodes.size());\n }\n } else {\n auto it = expensive_nodes.begin();\n while (it < expensive_nodes.end()) {\n auto end = it;\n std::advance(end, kInlineScheduleReadyThreshold);\n if (end > expensive_nodes.end()) {\n end = expensive_nodes.end();\n }\n TaggedNodeSeq ready_chunk{it, end};\n RunTask(\n [this, ready_chunk = std::move(ready_chunk), scheduled_nsec]() {\n tsl::profiler::TraceMe activity(\n [&]() {\n return strings::StrCat(\n \"ExecutorState::ScheduleReady::\"\n \"ChildThreadExpensiveNodes#\",\n \"ready_chunk_size=\", ready_chunk.size(), \"#\");\n },\n tsl::profiler::GetTFTraceMeLevel(false));\n for (auto& tagged_node : ready_chunk) {\n RunTask(std::bind(&ExecutorState::Process, this, tagged_node,\n scheduled_nsec),\n ready_chunk.size());\n }\n });\n it = end;\n }\n }\n }\n }\n ready->clear();\n}\ntemplate \nvoid ExecutorState::ScheduleFinish() {\n {\n mutex_lock lock(num_deferred_ops_mu_);\n if (num_deferred_ops_ > 0) {\n finish_when_deferred_ops_done_ = true;\n return;\n }\n }\n Finish();\n}\ntemplate \nvoid ExecutorState::Finish() {\n mu_.lock();\n auto status = status_;\n auto done_cb = std::move(done_cb_);\n auto runner = std::move(runner_);\n mu_.unlock();\n int64_t trace_id = trace_id_;\n int64_t step_id = step_id_;\n CHECK(done_cb != nullptr);\n Device* device = immutable_state_.params().device;\n if (vlog_ && !status.ok() && VLOG_IS_ON(1)) {\n propagator_.DumpState();\n }\n if (!device->AllowsSyncOnCompletion()) {\n status.Update(device->RefreshStatus());\n if (!status.ok()) {\n if (rendezvous_) {\n rendezvous_->StartAbort(status);\n }\n if (cancellation_manager_) {\n cancellation_manager_->StartCancelWithStatus(status);\n } else if (collective_executor_) {\n collective_executor_->StartAbort(status);\n }\n }\n delete this;\n runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {\n tsl::profiler::TraceMeConsumer activity(\n [&] {\n return tsl::profiler::TraceMeEncode(\"ExecutorDoneCallback\",\n {{\"id\", step_id}});\n },\n tsl::profiler::ContextType::kTfExecutor, trace_id,\n tsl::profiler::TraceMeLevel::kInfo);\n done_cb(status);\n });\n return;\n }\n if (sync_on_finish_ && status.ok()) {\n device->Sync([this, step_id, trace_id, runner = std::move(runner),\n done_cb = std::move(done_cb)](const Status& status) mutable {\n delete this;\n runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {\n tsl::profiler::TraceMeConsumer activity(\n [&] {\n return tsl::profiler::TraceMeEncode(\"ExecutorDoneCallback\",\n {{\"id\", step_id}});\n },\n tsl::profiler::ContextType::kTfExecutor, trace_id,\n tsl::profiler::TraceMeLevel::kInfo);\n done_cb(status);\n });\n });\n } else {\n delete this;\n runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {\n tsl::profiler::TraceMeConsumer activity(\n [&] {\n return tsl::profiler::TraceMeEncode(\"ExecutorDoneCallback\",\n {{\"id\", step_id}});\n },\n tsl::profiler::ContextType::kTfExecutor, trace_id,\n tsl::profiler::TraceMeLevel::kInfo);\n done_cb(status);\n });\n }\n}\nvoid ExecutorImpl::RunAsyncInternal(const Args& args, DoneCallback done) {\n if (OpOrderDeterminismRequired()) {\n (new ExecutorState(args, immutable_state_,\n &kernel_stats_))\n ->RunAsync(std::move(done));\n } else if (immutable_state_.requires_control_flow_support()) {\n (new ExecutorState(args, immutable_state_, &kernel_stats_))\n ->RunAsync(std::move(done));\n } else {\n (new ExecutorState(args, immutable_state_,\n &kernel_stats_))\n ->RunAsync(std::move(done));\n }\n}\n} \nStatus NewLocalExecutor(const LocalExecutorParams& params, const Graph& graph,\n Executor** executor) {\n ExecutorImpl* impl = new ExecutorImpl(params);\n const Status s = impl->Initialize(graph);\n if (s.ok()) {\n *executor = impl;\n } else {\n delete impl;\n }\n return s;\n}\nStatus CreateNonCachedKernel(Device* device, FunctionLibraryRuntime* flib,\n const std::shared_ptr& props,\n int graph_def_version, OpKernel** kernel) {\n const auto device_type = DeviceType(device->attributes().device_type());\n auto allocator = device->GetAllocator(AllocatorAttributes());\n return CreateOpKernel(device_type, device, allocator, flib,\n device->resource_manager(), props, graph_def_version,\n kernel);\n}\nvoid DeleteNonCachedKernel(OpKernel* kernel) { delete kernel; }\nnamespace {\nclass DefaultExecutorRegistrar {\n public:\n DefaultExecutorRegistrar() {\n Factory* factory = new Factory;\n ExecutorFactory::Register(\"\", factory);\n ExecutorFactory::Register(\"DEFAULT\", factory);\n }\n private:\n class Factory : public ExecutorFactory {\n Status NewExecutor(const LocalExecutorParams& params, const Graph& graph,\n std::unique_ptr* out_executor) override {\n Executor* ret = nullptr;\n TF_RETURN_IF_ERROR(NewLocalExecutor(params, std::move(graph), &ret));\n out_executor->reset(ret);\n return absl::OkStatus();\n }\n };\n};\nstatic DefaultExecutorRegistrar registrar;\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/executor.h\"\n#include \n#include \"tensorflow/cc/framework/ops.h\"\n#include \"tensorflow/cc/ops/array_ops.h\"\n#include \"tensorflow/cc/ops/const_op.h\"\n#include \"tensorflow/cc/ops/control_flow_ops_internal.h\"\n#include \"tensorflow/cc/ops/function_ops.h\"\n#include \"tensorflow/cc/ops/standard_ops.h\"\n#include \"tensorflow/core/common_runtime/device.h\"\n#include \"tensorflow/core/common_runtime/device_factory.h\"\n#include \"tensorflow/core/common_runtime/graph_constructor.h\"\n#include \"tensorflow/core/common_runtime/kernel_benchmark_testlib.h\"\n#include \"tensorflow/core/common_runtime/lower_functional_ops.h\"\n#include \"tensorflow/core/common_runtime/process_util.h\"\n#include \"tensorflow/core/common_runtime/step_stats_collector.h\"\n#include \"tensorflow/core/framework/attr_value.pb.h\"\n#include \"tensorflow/core/framework/local_rendezvous.h\"\n#include \"tensorflow/core/framework/op.h\"\n#include \"tensorflow/core/framework/rendezvous.h\"\n#include \"tensorflow/core/framework/step_stats.pb.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/framework/versions.pb.h\"\n#include \"tensorflow/core/graph/algorithm.h\"\n#include \"tensorflow/core/graph/testlib.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/random/simple_philox.h\"\n#include \"tensorflow/core/lib/strings/strcat.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/strcat.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\n#include \"tensorflow/core/public/session_options.h\"\nnamespace tensorflow {\nclass ExecutorTest : public ::testing::Test {\n protected:\n ExecutorTest()\n : device_(DeviceFactory::NewDevice(\"CPU\", {},\n \"/job:localhost/replica:0/task:0\")),\n step_stats_collector_(&step_stats_) {\n SessionOptions options;\n thread_pool_ = ComputePool(options);\n }\n ~ExecutorTest() override {\n while (!rendez_->RefCountIsOne()) {\n LOG(INFO) << \"Waiting for rendezvous to release. Current refcount: \"\n << rendez_->RefCount();\n absl::SleepFor(absl::Milliseconds(200));\n LocalRendezvous::ReleaseAbortedRendezvous();\n }\n CHECK(rendez_->Unref());\n delete exec_;\n }\n void Create(std::unique_ptr graph) {\n const int version = graph->versions().producer();\n LocalExecutorParams params;\n params.device = device_.get();\n params.create_kernel =\n [this, version](const std::shared_ptr& props,\n OpKernel** kernel) {\n return CreateNonCachedKernel(device_.get(), nullptr, props, version,\n kernel);\n };\n params.delete_kernel = [](OpKernel* kernel) {\n DeleteNonCachedKernel(kernel);\n };\n rendez_ = NewLocalRendezvous();\n delete exec_;\n TF_CHECK_OK(NewLocalExecutor(params, *graph, &exec_));\n runner_ = [this](std::function fn) { thread_pool_->Schedule(fn); };\n }\n Status Run(Rendezvous* rendez) {\n Executor::Args args;\n args.rendezvous = rendez;\n args.stats_collector = &step_stats_collector_;\n args.runner = runner_;\n return exec_->Run(args);\n }\n thread::ThreadPool* thread_pool_ = nullptr;\n std::unique_ptr device_;\n Executor* exec_ = nullptr;\n StepStatsCollector step_stats_collector_;\n StepStats step_stats_;\n Executor::Args::Runner runner_;\n Rendezvous* rendez_ = nullptr;\n};\nTensor V(const float val) {\n Tensor tensor(DT_FLOAT, TensorShape({}));\n tensor.scalar()() = val;\n return tensor;\n}\nTensor VI(const int32_t val) {\n Tensor tensor(DT_INT32, TensorShape({}));\n tensor.scalar()() = val;\n return tensor;\n}\nTensor VB(const bool val) {\n Tensor tensor(DT_BOOL, TensorShape({}));\n tensor.scalar()() = val;\n return tensor;\n}\nTensor VD(const double val) {\n Tensor tensor(DT_DOUBLE, TensorShape({}));\n tensor.scalar()() = val;\n return tensor;\n}\nfloat V(const Tensor& tensor) {\n CHECK_EQ(tensor.dtype(), DT_FLOAT);\n CHECK(TensorShapeUtils::IsScalar(tensor.shape()));\n return tensor.scalar()();\n}\nstatic uint64 kIncarnation = 1; \nRendezvous::ParsedKey Key(const string& sender, const uint64 incarnation,\n const string& receiver, const string& name) {\n Rendezvous::ParsedKey result;\n CHECK(\n Rendezvous::ParseKey(Rendezvous::CreateKey(sender, incarnation, receiver,\n name, FrameAndIter(0, 0)),\n &result)\n .ok());\n return result;\n}\n#define ALICE \"/job:j/replica:0/task:0/cpu:0\"\n#define BOB \"/job:j/replica:0/task:0/device:GPU:0\"\nTEST_F(ExecutorTest, SimpleAdd) {\n auto g = std::make_unique(OpRegistry::Global());\n auto in0 = test::graph::Recv(g.get(), \"a\", \"float\", ALICE, 1, BOB);\n auto in1 = test::graph::Recv(g.get(), \"b\", \"float\", ALICE, 1, BOB);\n auto tmp = test::graph::Add(g.get(), in0, in1);\n test::graph::Send(g.get(), tmp, \"c\", BOB, 1, ALICE);\n Create(std::move(g));\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"), args, V(1.0),\n false)); \n TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, \"b\"), args, V(1.0),\n false)); \n TF_ASSERT_OK(Run(rendez_));\n Tensor out = V(-1);\n bool is_dead = false;\n TF_ASSERT_OK(\n rendez_->Recv(Key(BOB, kIncarnation, ALICE, \"c\"), args, &out, &is_dead));\n EXPECT_EQ(2.0, V(out)); \n}\nTEST_F(ExecutorTest, SelfAdd) {\n auto g = std::make_unique(OpRegistry::Global());\n auto v = test::graph::Recv(g.get(), \"a\", \"float\", ALICE, 1, BOB);\n const int N = 10;\n for (int i = 1; i <= N; ++i) {\n v = test::graph::Add(g.get(), v, v);\n }\n test::graph::Send(g.get(), v, \"b\", BOB, 1, ALICE);\n Create(std::move(g));\n Rendezvous::Args args;\n TF_ASSERT_OK(\n rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"), args, V(1.0), false));\n TF_ASSERT_OK(Run(rendez_));\n Tensor out = V(-1);\n bool is_dead = false;\n TF_ASSERT_OK(\n rendez_->Recv(Key(BOB, kIncarnation, ALICE, \"b\"), args, &out, &is_dead));\n EXPECT_EQ(1024.0, V(out)); \n}\nvoid BuildTree(int N, Graph* g) {\n CHECK_GT(N, 1);\n auto in = test::graph::Recv(g, \"a\", \"float\", ALICE, 1, BOB);\n std::vector nodes;\n int i = 0;\n for (; i < N; ++i) {\n nodes.push_back(test::graph::Identity(g, in, 0));\n }\n random::PhiloxRandom philox(testing::RandomSeed(), 17);\n random::SimplePhilox rnd(&philox);\n while (nodes.size() > 1) {\n int x = rnd.Uniform(nodes.size());\n auto in0 = nodes[x];\n nodes[x] = nodes.back();\n nodes.resize(nodes.size() - 1);\n x = rnd.Uniform(nodes.size());\n auto in1 = nodes[x];\n nodes[x] = test::graph::Add(g, in0, in1);\n }\n test::graph::Send(g, nodes.back(), \"b\", BOB, 1, ALICE);\n}\nTEST_F(ExecutorTest, RandomTree) {\n auto g = std::make_unique(OpRegistry::Global());\n BuildTree(4096, g.get());\n Create(std::move(g));\n Rendezvous::Args args;\n TF_ASSERT_OK(\n rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"), args, V(1.0), false));\n TF_ASSERT_OK(Run(rendez_));\n Tensor out = V(-1);\n bool is_dead = false;\n TF_ASSERT_OK(\n rendez_->Recv(Key(BOB, kIncarnation, ALICE, \"b\"), args, &out, &is_dead));\n EXPECT_EQ(4096.0, V(out));\n}\nvoid BuildConcurrentAddAssign(Graph* g) {\n auto one = test::graph::Constant(g, V(1.0));\n auto var = test::graph::Var(g, DT_FLOAT, TensorShape({}));\n auto init = test::graph::Assign(g, var, one);\n auto out = test::graph::Send(g, var, \"out\", ALICE, kIncarnation, BOB);\n for (int i = 0; i < 1024; ++i) {\n auto add = test::graph::Add(g, var, one);\n g->AddControlEdge(init, add); \n auto assign = test::graph::Assign(g, var, add);\n g->AddControlEdge(assign, out);\n }\n}\n#ifndef THREAD_SANITIZER\nTEST_F(ExecutorTest, ConcurrentAddAssign) {\n auto g = std::make_unique(OpRegistry::Global());\n BuildConcurrentAddAssign(g.get());\n Create(std::move(g));\n for (int iters = 0; iters < 16; ++iters) {\n Rendezvous* rendez = NewLocalRendezvous();\n TF_ASSERT_OK(Run(rendez));\n Rendezvous::Args args;\n Tensor out;\n bool is_dead;\n TF_ASSERT_OK(rendez->Recv(Key(ALICE, kIncarnation, BOB, \"out\"), args, &out,\n &is_dead));\n VLOG(1) << \"Get \" << V(out);\n EXPECT_LE(V(out), 1025.0);\n rendez->Unref();\n }\n}\n#endif\nTEST_F(ExecutorTest, SimpleSwitchLive) {\n auto g = std::make_unique(OpRegistry::Global());\n auto in0 = test::graph::Recv(g.get(), \"a\", \"float\", ALICE, 1, BOB);\n auto in1 = test::graph::Constant(g.get(), VB(false));\n auto tmp = test::graph::Switch(g.get(), in0, in1);\n test::graph::Send(g.get(), tmp, \"c\", BOB, 1, ALICE);\n Create(std::move(g));\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"), args, V(1.0),\n false)); \n TF_ASSERT_OK(Run(rendez_));\n Tensor out = V(-1);\n bool is_dead = false;\n TF_ASSERT_OK(\n rendez_->Recv(Key(BOB, kIncarnation, ALICE, \"c\"), args, &out, &is_dead));\n EXPECT_EQ(1.0, V(out)); \n EXPECT_FALSE(is_dead);\n}\nTEST_F(ExecutorTest, SimpleSwitchDead) {\n auto g = std::make_unique(OpRegistry::Global());\n auto in0 = test::graph::Recv(g.get(), \"a\", \"float\", ALICE, 1, BOB);\n auto in1 = test::graph::Constant(g.get(), VB(true));\n auto tmp = test::graph::Switch(g.get(), in0, in1);\n test::graph::Send(g.get(), tmp, \"c\", BOB, 1, ALICE);\n Create(std::move(g));\n Rendezvous::Args args;\n TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"), args, V(1.0),\n false)); \n TF_ASSERT_OK(Run(rendez_));\n Tensor out = V(-1);\n bool is_dead = false;\n TF_ASSERT_OK(\n rendez_->Recv(Key(BOB, kIncarnation, ALICE, \"c\"), args, &out, &is_dead));\n EXPECT_TRUE(is_dead);\n}\nTEST_F(ExecutorTest, Abort) {\n auto g = std::make_unique(OpRegistry::Global());\n auto in0 = test::graph::Recv(g.get(), \"a\", \"float\", ALICE, 1, BOB);\n auto in1 = test::graph::Recv(g.get(), \"b\", \"float\", ALICE, 1, BOB);\n auto in2 = test::graph::Recv(g.get(), \"c\", \"float\", ALICE, 1, BOB);\n auto in3 = test::graph::Recv(g.get(), \"d\", \"float\", ALICE, 1, BOB);\n auto add0 = test::graph::Add(g.get(), in0, in1);\n auto add1 = test::graph::Add(g.get(), in2, in3);\n auto add2 = test::graph::Add(g.get(), add0, add1);\n test::graph::Send(g.get(), add2, \"e\", BOB, 1, ALICE);\n Create(std::move(g));\n rendez_->Ref();\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(100 * 1000);\n Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, \"a\"),\n Rendezvous::Args(), V(1.0), false);\n rendez_->Unref();\n });\n rendez_->Ref();\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(100 * 1000);\n Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, \"b\"),\n Rendezvous::Args(), V(1.0), false);\n rendez_->Unref();\n });\n rendez_->Ref();\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(100 * 1000);\n Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, \"c\"),\n Rendezvous::Args(), V(1.0), false);\n rendez_->Unref();\n });\n rendez_->Ref();\n SchedClosure([this]() {\n Env::Default()->SleepForMicroseconds(100 * 1000);\n rendez_->StartAbort(errors::Aborted(\"\"));\n rendez_->Unref();\n });\n EXPECT_TRUE(errors::IsAborted(Run(rendez_)));\n Tensor out = V(-1);\n bool is_dead = false;\n EXPECT_TRUE(errors::IsAborted(rendez_->Recv(\n Key(BOB, kIncarnation, ALICE, \"c\"), Rendezvous::Args(), &out, &is_dead)));\n}\nTEST_F(ExecutorTest, RecvInvalidDtype) {\n auto g = std::make_unique(OpRegistry::Global());\n auto one = test::graph::Recv(g.get(), \"one\", \"float\", ALICE, 1, BOB);\n auto var = test::graph::Var(g.get(), DT_FLOAT, TensorShape({1}));\n auto init = test::graph::Assign(g.get(), var, one);\n auto* two = test::graph::Send(g.get(), var, \"two\", BOB, 1, ALICE);\n g->AddControlEdge(init, two); \n Create(std::move(g));\n Rendezvous* rendez = NewLocalRendezvous();\n TF_ASSERT_OK(rendez->Send(Key(ALICE, 1, BOB, \"one\"), Rendezvous::Args(),\n VD(1.0), false));\n EXPECT_TRUE(errors::IsInternal(Run(rendez)));\n Tensor output;\n bool is_dead;\n EXPECT_TRUE(errors::IsInternal(rendez->Recv(\n Key(BOB, 1, ALICE, \"two\"), Rendezvous::Args(), &output, &is_dead)));\n rendez->Unref();\n}\nTEST_F(ExecutorTest, RecvInvalidRefDtype) {\n auto g = std::make_unique(OpRegistry::Global());\n auto var = test::graph::InvalidRefType(g.get(), DT_FLOAT, DT_DOUBLE);\n test::graph::Send(g.get(), var, \"out\", BOB, 1, ALICE);\n Create(std::move(g));\n Rendezvous* rendez = NewLocalRendezvous();\n EXPECT_TRUE(errors::IsInternal(Run(rendez)));\n Tensor output;\n bool is_dead;\n EXPECT_TRUE(errors::IsInternal(rendez->Recv(\n Key(BOB, 1, ALICE, \"out\"), Rendezvous::Args(), &output, &is_dead)));\n rendez->Unref();\n}\nTEST_F(ExecutorTest, NoInputTensors) {\n auto g = std::make_unique(OpRegistry::Global());\n test::graph::Constant(g.get(), V(1.0));\n Create(std::move(g));\n TF_ASSERT_OK(Run(rendez_));\n}\nstatic void BM_executor(::testing::benchmark::State& state) {\n const int width = state.range(0);\n const int depth = state.range(1);\n Graph* g = new Graph(OpRegistry::Global());\n random::PhiloxRandom philox(1729, 17);\n random::SimplePhilox rand(&philox);\n uint64 cur = 0;\n uint32 r = 1 + rand.Rand32() % width;\n std::vector ready_nodes;\n for (int i = 0; i < r; ++i) {\n ready_nodes.push_back(test::graph::NoOp(g, {}));\n ++cur;\n }\n std::random_device random_device;\n std::mt19937 rng(random_device());\n for (int i = 0; i < depth; ++i) {\n std::shuffle(ready_nodes.begin(), ready_nodes.end(), rng);\n r = 1 + rand.Rand32() % (ready_nodes.size());\n std::vector control_inputs;\n for (int j = 0; j < r; ++j) {\n control_inputs.push_back(ready_nodes.back());\n ready_nodes.pop_back();\n }\n Node* n = test::graph::NoOp(g, control_inputs);\n ++cur;\n r = 1 + rand.Rand32() % width;\n for (int j = 0; j < r; ++j) {\n ready_nodes.push_back(test::graph::NoOp(g, {n}));\n ++cur;\n }\n }\n FixupSourceAndSinkEdges(g);\n test::Benchmark(\"cpu\", g, false).Run(state);\n state.SetLabel(strings::StrCat(\"Nodes = \", cur));\n state.SetItemsProcessed(cur * static_cast(state.iterations()));\n}\nBENCHMARK(BM_executor)->UseRealTime()->ArgPair(16, 1024);\nBENCHMARK(BM_executor)->UseRealTime()->ArgPair(32, 8192);\nBENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 16);\nBENCHMARK(BM_executor)->UseRealTime()->ArgPair(8192, 32);\nBENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 1024);\nstatic void BM_const_identity(::testing::benchmark::State& state) {\n const int width = state.range(0);\n const int outputs_per_const = state.range(1);\n Graph* g = new Graph(OpRegistry::Global());\n for (int i = 0; i < width; ++i) {\n Tensor i_t(i);\n Node* const_node = test::graph::Constant(g, i_t);\n for (int j = 0; j < outputs_per_const; ++j) {\n test::graph::Identity(g, const_node);\n }\n }\n FixupSourceAndSinkEdges(g);\n test::Benchmark(\"cpu\", g, false).Run(state);\n state.SetLabel(strings::StrCat(\"Nodes = \", (1 + outputs_per_const) * width));\n state.SetItemsProcessed((1 + outputs_per_const) * width *\n static_cast(state.iterations()));\n}\nBENCHMARK(BM_const_identity)\n ->UseRealTime()\n ->ArgPair(1, 1)\n ->ArgPair(1, 100)\n ->ArgPair(100, 1)\n ->ArgPair(100, 100);\nstatic void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {\n Graph* g = new Graph(OpRegistry::Global());\n Node* x = test::graph::Recv(g, \"x\", \"float\", ALICE, 1, BOB);\n Node* y = test::graph::Recv(g, \"y\", \"float\", ALICE, 1, BOB);\n Node* sum = test::graph::Add(g, x, y);\n Node* z = test::graph::Send(g, sum, \"z\", BOB, 1, ALICE);\n string x_key = test::GetRendezvousKey(x);\n string y_key = test::GetRendezvousKey(y);\n string z_key = test::GetRendezvousKey(z);\n Tensor val(DT_FLOAT, TensorShape({}));\n val.scalar()() = 3.14;\n FixupSourceAndSinkEdges(g);\n test::Benchmark(\"cpu\", g, false)\n .RunWithRendezvousArgs({{x_key, val}, {y_key, val}}, {z_key}, state);\n state.SetItemsProcessed(static_cast(state.iterations()));\n}\nBENCHMARK(BM_FeedInputFetchOutput);\nStatus ReplaceEdgeWithSendRecv(Graph* g, const Edge* edge, const string& tensor,\n const string& sender,\n const uint64 sender_incarnation,\n const string& receiver) {\n Node* send;\n NodeDef send_def;\n TF_CHECK_OK(NodeDefBuilder(g->NewName(\"n\"), \"_Send\")\n .Input(edge->src()->name(), edge->src_output(),\n edge->src()->output_type(edge->src_output()))\n .Attr(\"tensor_name\", tensor)\n .Attr(\"send_device\", sender)\n .Attr(\"send_device_incarnation\",\n static_cast(sender_incarnation))\n .Attr(\"recv_device\", receiver)\n .Finalize(&send_def));\n TF_ASSIGN_OR_RETURN(send, g->AddNode(send_def));\n Node* recv;\n NodeDef recv_def;\n TF_CHECK_OK(\n NodeDefBuilder(g->NewName(\"n\"), \"_Recv\")\n .Attr(\"tensor_name\", tensor)\n .Attr(\"send_device\", sender)\n .Attr(\"send_device_incarnation\",\n static_cast(sender_incarnation))\n .Attr(\"recv_device\", receiver)\n .Attr(\"tensor_type\", edge->dst()->input_type(edge->dst_input()))\n .Finalize(&recv_def));\n TF_ASSIGN_OR_RETURN(recv, g->AddNode(recv_def));\n g->AddEdge(edge->src(), edge->src_output(), send, 0);\n g->AddEdge(recv, 0, edge->dst(), edge->dst_input());\n g->AddControlEdge(edge->src(), recv);\n g->RemoveEdge(edge);\n return absl::OkStatus();\n}\nstatic void BM_WhileLoopHelper(::testing::benchmark::State& state,\n int loop_iters, int loop_vars, bool lower,\n bool transfer) {\n std::unique_ptr graph(new Graph(OpRegistry::Global()));\n FunctionDefLibrary f_lib_proto;\n const Tensor one_t = test::AsScalar(1);\n std::vector args;\n args.reserve(loop_vars);\n args.push_back(\"x: int32\");\n for (int i = 1; i < loop_vars; ++i) {\n args.push_back(strings::StrCat(\"x\", i, \": int32\"));\n }\n std::vector body_rets;\n body_rets.reserve(loop_vars);\n body_rets.push_back(\"y: int32\");\n for (int i = 1; i < loop_vars; ++i) {\n body_rets.push_back(strings::StrCat(\"y\", i, \": int32\"));\n }\n std::vector body_nodes;\n body_nodes.reserve(1 + loop_vars);\n body_nodes.push_back(\n {{\"one\"}, \"Const\", {}, {{\"value\", one_t}, {\"dtype\", DT_INT32}}});\n body_nodes.push_back({{\"y\"}, \"Add\", {\"x\", \"one\"}, {{\"T\", DT_INT32}}});\n for (int i = 1; i < loop_vars; ++i) {\n body_nodes.push_back({{strings::StrCat(\"y\", i)},\n \"Relu\",\n {strings::StrCat(\"x\", i)},\n {{\"T\", DT_INT32}}});\n }\n *f_lib_proto.add_function() = FunctionDefHelper::Define(\n \"XPlusOne\",\n args,\n body_rets,\n {},\n body_nodes);\n const Tensor loop_iters_t = test::AsScalar(loop_iters);\n *f_lib_proto.add_function() = FunctionDefHelper::Define(\n \"LessThanOrEqualToN\",\n args,\n {\"z: bool\"},\n {},\n {\n {{\"N\"}, \"Const\", {}, {{\"value\", loop_iters_t}, {\"dtype\", DT_INT32}}},\n {{\"z\"}, \"LessEqual\", {\"x\", \"N\"}, {{\"T\", DT_INT32}}},\n });\n Scope root = Scope::NewRootScope().ExitOnError();\n TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));\n auto a = ops::Const(root.WithOpName(\"A\"), 0, {});\n Node* while_node;\n std::vector inputs;\n std::vector input_types(loop_vars, DT_INT32);\n inputs.reserve(loop_vars);\n for (int i = 0; i < loop_vars; ++i) {\n inputs.push_back(NodeBuilder::NodeOut(a.node()));\n }\n AttrValue int32_attr;\n int32_attr.set_type(DT_INT32);\n AttrValue cond_func;\n cond_func.mutable_func()->set_name(\"LessThanOrEqualToN\");\n AttrValue body_func;\n body_func.mutable_func()->set_name(\"XPlusOne\");\n TF_ASSERT_OK(\n NodeBuilder(\"while\", \"While\", &root.graph()->flib_def())\n .Input(inputs)\n .Attr(\"T\", input_types)\n .Attr(\"cond\", cond_func)\n .Attr(\"body\", body_func)\n .Attr(\"parallel_iterations\", 20)\n .Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)\n .Finalize(root.graph(), &while_node));\n auto c = ops::Identity(\n root.WithOpName(\"C\").WithControlDependencies(Output(while_node)),\n Output(while_node));\n TF_ASSERT_OK(root.DoShapeInference(while_node));\n TF_ASSERT_OK(root.ToGraph(graph.get()));\n if (lower) {\n FunctionLibraryDefinition flib_def(graph->flib_def());\n GraphOptimizationPassOptions opt_options;\n SessionOptions session_options;\n session_options.config.mutable_graph_options()\n ->mutable_optimizer_options()\n ->set_do_function_inlining(true);\n opt_options.session_options = &session_options;\n opt_options.graph = &graph;\n opt_options.flib_def = &flib_def;\n LowerFunctionalOpsPass pass;\n TF_ASSERT_OK(pass.Run(opt_options));\n if (transfer) {\n for (Node* node : graph->nodes()) {\n if (node->type_string() != \"LoopCond\") {\n continue;\n }\n for (const Edge* edge : node->out_edges()) {\n if (edge->dst()->type_string() != \"Switch\") {\n continue;\n }\n string tensor_name = strings::StrCat(\"c\", edge->id());\n TF_ASSERT_OK(ReplaceEdgeWithSendRecv(graph.get(), edge, tensor_name,\n BOB, 1, ALICE));\n }\n }\n }\n }\n SessionOptions options;\n options.config.set_inter_op_parallelism_threads(4);\n FixupSourceAndSinkEdges(graph.get());\n test::Benchmark(\"cpu\", graph.release(), &options, nullptr, nullptr, \"\",\n false)\n .Run(state);\n}\nstatic void BM_LoweredWhileLoop(::testing::benchmark::State& state) {\n const int loop_iters = state.range(0);\n const int loop_vars = state.range(1);\n BM_WhileLoopHelper(state, loop_iters, loop_vars, true,\n false);\n}\nBENCHMARK(BM_LoweredWhileLoop)\n ->ArgPair(0, 1)\n ->ArgPair(1, 1)\n ->ArgPair(10, 1)\n ->ArgPair(100, 1)\n ->ArgPair(1000, 1)\n ->ArgPair(0, 100)\n ->ArgPair(1, 100)\n ->ArgPair(10, 100)\n ->ArgPair(100, 100)\n ->ArgPair(1000, 100);\nstatic void BM_LoweredWhileLoopWithTransfer(\n ::testing::benchmark::State& state) {\n const int loop_iters = state.range(0);\n const int loop_vars = state.range(1);\n BM_WhileLoopHelper(state, loop_iters, loop_vars, true,\n true);\n}\nBENCHMARK(BM_LoweredWhileLoopWithTransfer)\n ->ArgPair(0, 100)\n ->ArgPair(1, 100)\n ->ArgPair(10, 100)\n ->ArgPair(100, 100)\n ->ArgPair(1000, 100)\n ->ArgPair(1, 5000)\n ->ArgPair(10, 5000)\n ->ArgPair(100, 5000)\n ->ArgPair(1000, 5000);\nstatic void BM_FunctionalWhileLoop(::testing::benchmark::State& state) {\n const int loop_iters = state.range(0);\n const int loop_vars = state.range(1);\n BM_WhileLoopHelper(state, loop_iters, loop_vars, false,\n false);\n}\nBENCHMARK(BM_FunctionalWhileLoop)\n ->ArgPair(0, 1)\n ->ArgPair(1, 1)\n ->ArgPair(10, 1)\n ->ArgPair(100, 1)\n ->ArgPair(1000, 1)\n ->ArgPair(0, 100)\n ->ArgPair(1, 100)\n ->ArgPair(10, 100)\n ->ArgPair(100, 100)\n ->ArgPair(1000, 100);\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1185,"cells":{"ID":{"kind":"string","value":"0ef74f11-3523-4202-9791-2c1beb9227af"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"grpc_worker_cache"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/coordination/grpc_coordination_client.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/grpc_util.h\"\n#include \"tensorflow/core/distributed_runtime/worker_cache_logger.h\"\n#include \"tensorflow/core/distributed_runtime/worker_cache_partial.h\"\n#include \"tensorflow/core/distributed_runtime/worker_interface.h\"\n#include \"tensorflow/core/platform/cpu_info.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/util/env_var.h\"\nnamespace tensorflow {\nnamespace {\nclass GrpcWorkerCache : public WorkerCachePartial {\n public:\n explicit GrpcWorkerCache(std::shared_ptr channel_cache,\n WorkerInterface* local_worker,\n const string& local_target,\n GrpcWorkerEnv* worker_env)\n : local_target_(local_target),\n local_worker_(local_worker),\n channel_cache_(channel_cache),\n worker_env_(worker_env),\n next_round_robin_assignment_(0) {}\n void ListWorkers(std::vector* workers) const override {\n channel_cache_->ListWorkers(workers);\n }\n void ListWorkersInJob(const string& job_name,\n std::vector* workers) const override {\n channel_cache_->ListWorkersInJob(job_name, workers);\n }\n WorkerInterface* GetOrCreateWorker(const string& target) override {\n if (target == local_target_) {\n return local_worker_;\n } else {\n SharedGrpcChannelPtr channel = channel_cache_->FindWorkerChannel(target);\n if (!channel) {\n return nullptr;\n }\n size_t index = AssignWorkerToThread(target);\n return NewGrpcRemoteWorker(\n channel, worker_env_->GetCompletionQueue(index),\n worker_env_->GetThreadPool(), &logger_, target);\n }\n }\n void ReleaseWorker(const string& target, WorkerInterface* worker) override {\n if (target == local_target_) {\n CHECK_EQ(worker, local_worker_)\n << \"Releasing a worker that was not returned by this WorkerCache\";\n } else {\n WorkerCacheInterface::ReleaseWorker(target, worker);\n }\n }\n Status GetEagerClientCache(\n std::unique_ptr* eager_client_cache) override {\n eager_client_cache->reset(eager::NewGrpcEagerClientCache(channel_cache_));\n return absl::OkStatus();\n }\n Status GetCoordinationClientCache(std::unique_ptr*\n coordination_client_cache) override {\n coordination_client_cache->reset(\n NewGrpcCoordinationClientCache(channel_cache_));\n return absl::OkStatus();\n }\n void SetLogging(bool v) override { logger_.SetLogging(v); }\n void ClearLogs() override { logger_.ClearLogs(); }\n bool RetrieveLogs(int64_t step_id, StepStats* ss) override {\n return logger_.RetrieveLogs(step_id, ss);\n }\n private:\n size_t AssignWorkerToThread(const string& target) {\n mutex_lock lock(assignment_mu_);\n auto it = target_assignments_.find(target);\n if (it == target_assignments_.end()) {\n it = target_assignments_\n .insert(std::make_pair(target,\n (next_round_robin_assignment_++) %\n worker_env_->CompletionQueueSize()))\n .first;\n }\n return it->second;\n }\n const string local_target_;\n WorkerInterface* const local_worker_; \n std::shared_ptr channel_cache_;\n WorkerCacheLogger logger_;\n GrpcWorkerEnv* worker_env_; \n mutex assignment_mu_;\n std::unordered_map target_assignments_\n TF_GUARDED_BY(assignment_mu_);\n size_t next_round_robin_assignment_ TF_GUARDED_BY(assignment_mu_);\n};\n} \nGrpcWorkerEnv::GrpcWorkerEnv(size_t num_completion_queues, size_t num_threads)\n : threadpool_(new thread::ThreadPool(\n Env::Default(), ThreadOptions(), \"GrpcWorkerEnvQueues\", num_threads,\n false, nullptr)),\n threads_(num_completion_queues) {}\nGrpcWorkerEnv::~GrpcWorkerEnv() { threads_.clear(); }\nGrpcWorkerEnv::GrpcWorkerCacheThread::GrpcWorkerCacheThread() {\n thread_.reset(Env::Default()->StartThread(\n ThreadOptions(), \"GrpcWorkerEnvPool\", [this]() {\n void* tag;\n bool ok;\n while (completion_queue_.Next(&tag, &ok)) {\n GrpcClientCQTag* callback_tag = static_cast(tag);\n callback_tag->OnCompleted(ok);\n }\n }));\n}\nGrpcWorkerEnv::GrpcWorkerCacheThread::~GrpcWorkerCacheThread() {\n completion_queue_.Shutdown();\n thread_.reset();\n}\nGrpcWorkerEnv* CreateGrpcWorkerEnv() {\n int num_cpus = port::NumSchedulableCPUs();\n int64_t num_completion_queues;\n Status status = ReadInt64FromEnvVar(\"TF_GRPC_WORKER_CACHE_QUEUES\", 64,\n &num_completion_queues);\n if (!status.ok()) {\n LOG(ERROR) << \"Error parsing TF_GRPC_WORKER_CACHE_QUEUES: \" << status;\n }\n int64_t num_threads;\n status = ReadInt64FromEnvVar(\"TF_GRPC_WORKER_CACHE_THREADS\", num_cpus,\n &num_threads);\n if (!status.ok()) {\n LOG(ERROR) << \"Error parsing TF_GRPC_WORKER_CACHE_THREADS: \" << status;\n }\n return new GrpcWorkerEnv(num_completion_queues, num_threads);\n}\nWorkerCacheInterface* NewGrpcWorkerCache(std::shared_ptr cc,\n GrpcWorkerEnv* worker_env) {\n return new GrpcWorkerCache(cc, nullptr, \"\",\n worker_env);\n}\nWorkerCacheInterface* NewGrpcWorkerCacheWithLocalWorker(\n std::shared_ptr cc, GrpcWorkerEnv* worker_env,\n WorkerInterface* local_worker, const string& local_target) {\n return new GrpcWorkerCache(cc, local_worker, local_target, worker_env);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h\"\n#include \"tensorflow/c/tf_status.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/grpc_channel.h\"\n#include \"tensorflow/core/distributed_runtime/test_utils.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/strcat.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/threadpool.h\"\nnamespace tensorflow {\nTEST(GrpcWorkerCacheTest, NewGrpcWorkerCache) {\n GrpcChannelSpec spec;\n TF_ASSERT_OK(\n spec.AddHostPortsJob(\"worker\", {{0, \"a:0\"}, {1, \"b:1\"}, {2, \"c:2\"}}));\n ChannelCreationFunction channel_func =\n ConvertToChannelCreationFunction(NewHostPortGrpcChannel);\n auto channel_cache = std::shared_ptr(\n NewGrpcChannelCache(spec, channel_func));\n std::unique_ptr grpc_worker_env(CreateGrpcWorkerEnv());\n std::unique_ptr worker_cache(\n NewGrpcWorkerCache(channel_cache, grpc_worker_env.get()));\n WorkerInterface* wi;\n wi = worker_cache->GetOrCreateWorker(\"/job:worker/replica:0/task:0\");\n EXPECT_NE(wi, nullptr);\n worker_cache->ReleaseWorker(\"/job:worker/replica:0/task:0\", wi);\n wi = worker_cache->GetOrCreateWorker(\"/job:worker/replica:0/task:1\");\n EXPECT_NE(wi, nullptr);\n worker_cache->ReleaseWorker(\"/job:worker/replica:0/task:1\", wi);\n wi = worker_cache->GetOrCreateWorker(\"/job:worker/replica:0/task:2\");\n EXPECT_NE(wi, nullptr);\n worker_cache->ReleaseWorker(\"/job:worker/replica:0/task:2\", wi);\n wi = worker_cache->GetOrCreateWorker(\"/job:worker/replica:0/task:3\");\n EXPECT_EQ(wi, nullptr);\n std::unique_ptr local_wi;\n worker_cache.reset(NewGrpcWorkerCacheWithLocalWorker(\n channel_cache, grpc_worker_env.get(), local_wi.get(), \"local_target\"));\n wi = worker_cache->GetOrCreateWorker(\"local_target\");\n EXPECT_EQ(wi, local_wi.get());\n}\nTEST(GrpcWorkerCacheTest, DestructWorkerCacheInThreadPool) {\n GrpcChannelSpec spec;\n TF_ASSERT_OK(\n spec.AddHostPortsJob(\"worker\", {{0, \"a:0\"}, {1, \"b:1\"}, {2, \"c:2\"}}));\n ChannelCreationFunction channel_func =\n ConvertToChannelCreationFunction(NewHostPortGrpcChannel);\n auto channel_cache = std::shared_ptr(\n NewGrpcChannelCache(spec, channel_func));\n std::unique_ptr grpc_worker_env(CreateGrpcWorkerEnv());\n WorkerCacheInterface* worker_cache =\n NewGrpcWorkerCache(channel_cache, grpc_worker_env.get());\n thread::ThreadPool* tp = grpc_worker_env->GetThreadPool();\n Notification n;\n tp->Schedule([worker_cache, &n] {\n delete worker_cache;\n n.Notify();\n });\n n.WaitForNotification();\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1186,"cells":{"ID":{"kind":"string","value":"0ea8a6b6-b2a9-416e-9b86-777aa27c03fe"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/cel-cpp"},"File Name":{"kind":"string","value":"proto_time_encoding"},"File Path in Repository":{"kind":"string","value":"internal/proto_time_encoding.cc"},"File Path for Unit Test":{"kind":"string","value":"internal/proto_time_encoding_test.cc"},"Code":{"kind":"string","value":"#include \"internal/proto_time_encoding.h\"\n#include \n#include \"google/protobuf/duration.pb.h\"\n#include \"google/protobuf/timestamp.pb.h\"\n#include \"google/protobuf/util/time_util.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/time/time.h\"\n#include \"internal/status_macros.h\"\n#include \"internal/time.h\"\nnamespace cel::internal {\nnamespace {\nabsl::Status Validate(absl::Time time) {\n if (time < cel::internal::MinTimestamp()) {\n return absl::InvalidArgumentError(\"time below min\");\n }\n if (time > cel::internal::MaxTimestamp()) {\n return absl::InvalidArgumentError(\"time above max\");\n }\n return absl::OkStatus();\n}\nabsl::Status CelValidateDuration(absl::Duration duration) {\n if (duration < cel::internal::MinDuration()) {\n return absl::InvalidArgumentError(\"duration below min\");\n }\n if (duration > cel::internal::MaxDuration()) {\n return absl::InvalidArgumentError(\"duration above max\");\n }\n return absl::OkStatus();\n}\n} \nabsl::Duration DecodeDuration(const google::protobuf::Duration& proto) {\n return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());\n}\nabsl::Time DecodeTime(const google::protobuf::Timestamp& proto) {\n return absl::FromUnixSeconds(proto.seconds()) +\n absl::Nanoseconds(proto.nanos());\n}\nabsl::Status EncodeDuration(absl::Duration duration,\n google::protobuf::Duration* proto) {\n CEL_RETURN_IF_ERROR(CelValidateDuration(duration));\n const int64_t s = absl::IDivDuration(duration, absl::Seconds(1), &duration);\n const int64_t n =\n absl::IDivDuration(duration, absl::Nanoseconds(1), &duration);\n proto->set_seconds(s);\n proto->set_nanos(n);\n return absl::OkStatus();\n}\nabsl::StatusOr EncodeDurationToString(absl::Duration duration) {\n google::protobuf::Duration d;\n auto status = EncodeDuration(duration, &d);\n if (!status.ok()) {\n return status;\n }\n return google::protobuf::util::TimeUtil::ToString(d);\n}\nabsl::Status EncodeTime(absl::Time time, google::protobuf::Timestamp* proto) {\n CEL_RETURN_IF_ERROR(Validate(time));\n const int64_t s = absl::ToUnixSeconds(time);\n proto->set_seconds(s);\n proto->set_nanos((time - absl::FromUnixSeconds(s)) / absl::Nanoseconds(1));\n return absl::OkStatus();\n}\nabsl::StatusOr EncodeTimeToString(absl::Time time) {\n google::protobuf::Timestamp t;\n auto status = EncodeTime(time, &t);\n if (!status.ok()) {\n return status;\n }\n return google::protobuf::util::TimeUtil::ToString(t);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"internal/proto_time_encoding.h\"\n#include \"google/protobuf/duration.pb.h\"\n#include \"google/protobuf/timestamp.pb.h\"\n#include \"absl/time/time.h\"\n#include \"internal/testing.h\"\n#include \"testutil/util.h\"\nnamespace cel::internal {\nnamespace {\nusing ::google::api::expr::testutil::EqualsProto;\nTEST(EncodeDuration, Basic) {\n google::protobuf::Duration proto_duration;\n ASSERT_OK(\n EncodeDuration(absl::Seconds(2) + absl::Nanoseconds(3), &proto_duration));\n EXPECT_THAT(proto_duration, EqualsProto(\"seconds: 2 nanos: 3\"));\n}\nTEST(EncodeDurationToString, Basic) {\n ASSERT_OK_AND_ASSIGN(\n std::string json,\n EncodeDurationToString(absl::Seconds(5) + absl::Nanoseconds(20)));\n EXPECT_EQ(json, \"5.000000020s\");\n}\nTEST(EncodeTime, Basic) {\n google::protobuf::Timestamp proto_timestamp;\n ASSERT_OK(EncodeTime(absl::FromUnixMillis(300000), &proto_timestamp));\n EXPECT_THAT(proto_timestamp, EqualsProto(\"seconds: 300\"));\n}\nTEST(EncodeTimeToString, Basic) {\n ASSERT_OK_AND_ASSIGN(std::string json,\n EncodeTimeToString(absl::FromUnixMillis(80030)));\n EXPECT_EQ(json, \"1970-01-01T00:01:20.030Z\");\n}\nTEST(DecodeDuration, Basic) {\n google::protobuf::Duration proto_duration;\n proto_duration.set_seconds(450);\n proto_duration.set_nanos(4);\n EXPECT_EQ(DecodeDuration(proto_duration),\n absl::Seconds(450) + absl::Nanoseconds(4));\n}\nTEST(DecodeTime, Basic) {\n google::protobuf::Timestamp proto_timestamp;\n proto_timestamp.set_seconds(450);\n EXPECT_EQ(DecodeTime(proto_timestamp), absl::FromUnixSeconds(450));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_time_encoding.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_time_encoding_test.cc"},"Commit Hash":{"kind":"string","value":"4552db5798fb0853b131b783d8875794334fae7f"}}},{"rowIdx":1187,"cells":{"ID":{"kind":"string","value":"c8984e03-8b10-44e4-81f8-3b0019417a42"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"str_replace"},"File Path in Repository":{"kind":"string","value":"absl/strings/str_replace.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/strings/str_replace_test.cc"},"Code":{"kind":"string","value":"#include \"absl/strings/str_replace.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/config.h\"\n#include \"absl/base/nullability.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace strings_internal {\nusing FixedMapping =\n std::initializer_list>;\nint ApplySubstitutions(\n absl::string_view s,\n absl::Nonnull*> subs_ptr,\n absl::Nonnull result_ptr) {\n auto& subs = *subs_ptr;\n int substitutions = 0;\n size_t pos = 0;\n while (!subs.empty()) {\n auto& sub = subs.back();\n if (sub.offset >= pos) {\n if (pos <= s.size()) {\n StrAppend(result_ptr, s.substr(pos, sub.offset - pos), sub.replacement);\n }\n pos = sub.offset + sub.old.size();\n substitutions += 1;\n }\n sub.offset = s.find(sub.old, pos);\n if (sub.offset == s.npos) {\n subs.pop_back();\n } else {\n size_t index = subs.size();\n while (--index && subs[index - 1].OccursBefore(subs[index])) {\n std::swap(subs[index], subs[index - 1]);\n }\n }\n }\n result_ptr->append(s.data() + pos, s.size() - pos);\n return substitutions;\n}\n} \nstd::string StrReplaceAll(absl::string_view s,\n strings_internal::FixedMapping replacements) {\n return StrReplaceAll(s, replacements);\n}\nint StrReplaceAll(strings_internal::FixedMapping replacements,\n absl::Nonnull target) {\n return StrReplaceAll(replacements, target);\n}\nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/strings/str_replace.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"gtest/gtest.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\nTEST(StrReplaceAll, OneReplacement) {\n std::string s;\n s = absl::StrReplaceAll(s, {{\"\", \"\"}});\n EXPECT_EQ(s, \"\");\n s = absl::StrReplaceAll(s, {{\"x\", \"\"}});\n EXPECT_EQ(s, \"\");\n s = absl::StrReplaceAll(s, {{\"\", \"y\"}});\n EXPECT_EQ(s, \"\");\n s = absl::StrReplaceAll(s, {{\"x\", \"y\"}});\n EXPECT_EQ(s, \"\");\n s = absl::StrReplaceAll(\"abc\", {{\"\", \"\"}});\n EXPECT_EQ(s, \"abc\");\n s = absl::StrReplaceAll(\"abc\", {{\"\", \"y\"}});\n EXPECT_EQ(s, \"abc\");\n s = absl::StrReplaceAll(\"abc\", {{\"x\", \"\"}});\n EXPECT_EQ(s, \"abc\");\n s = absl::StrReplaceAll(\"abc\", {{\"xyz\", \"123\"}});\n EXPECT_EQ(s, \"abc\");\n s = absl::StrReplaceAll(\"abc\", {{\"abc\", \"xyz\"}});\n EXPECT_EQ(s, \"xyz\");\n s = absl::StrReplaceAll(\"abc\", {{\"a\", \"x\"}});\n EXPECT_EQ(s, \"xbc\");\n s = absl::StrReplaceAll(\"abc\", {{\"b\", \"x\"}});\n EXPECT_EQ(s, \"axc\");\n s = absl::StrReplaceAll(\"abc\", {{\"c\", \"x\"}});\n EXPECT_EQ(s, \"abx\");\n s = absl::StrReplaceAll(\"ababa\", {{\"a\", \"xxx\"}});\n EXPECT_EQ(s, \"xxxbxxxbxxx\");\n s = absl::StrReplaceAll(\"ababa\", {{\"b\", \"xxx\"}});\n EXPECT_EQ(s, \"axxxaxxxa\");\n s = absl::StrReplaceAll(\"aaabaaabaaa\", {{\"aaa\", \"x\"}});\n EXPECT_EQ(s, \"xbxbx\");\n s = absl::StrReplaceAll(\"abbbabbba\", {{\"bbb\", \"x\"}});\n EXPECT_EQ(s, \"axaxa\");\n s = absl::StrReplaceAll(\"aaa\", {{\"aa\", \"x\"}});\n EXPECT_EQ(s, \"xa\");\n s = absl::StrReplaceAll(\"aaa\", {{\"aa\", \"a\"}});\n EXPECT_EQ(s, \"aa\");\n}\nTEST(StrReplaceAll, ManyReplacements) {\n std::string s;\n s = absl::StrReplaceAll(\"\", {{\"\", \"\"}, {\"x\", \"\"}, {\"\", \"y\"}, {\"x\", \"y\"}});\n EXPECT_EQ(s, \"\");\n s = absl::StrReplaceAll(\"abc\", {{\"\", \"\"}, {\"\", \"y\"}, {\"x\", \"\"}});\n EXPECT_EQ(s, \"abc\");\n s = absl::StrReplaceAll(\"abc\", {{\"a\", \"x\"}, {\"b\", \"y\"}, {\"c\", \"z\"}});\n EXPECT_EQ(s, \"xyz\");\n s = absl::StrReplaceAll(\"zxy\", {{\"z\", \"x\"}, {\"x\", \"y\"}, {\"y\", \"z\"}});\n EXPECT_EQ(s, \"xyz\");\n s = absl::StrReplaceAll(\"abc\", {{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"abc\", \"xyz\"}});\n EXPECT_EQ(s, \"xyz\");\n s = absl::StrReplaceAll(\n \"Abc!\", {{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"b\", \"y\"}, {\"bc\", \"yz\"}, {\"c\", \"z\"}});\n EXPECT_EQ(s, \"Ayz!\");\n s = absl::StrReplaceAll(\n \"Abc!\",\n {{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"b\", \"y\"}, {\"bc!\", \"yz?\"}, {\"c!\", \"z;\"}});\n EXPECT_EQ(s, \"Ayz?\");\n s = absl::StrReplaceAll(\"ababa\", {{\"a\", \"xxx\"}, {\"b\", \"XXXX\"}});\n EXPECT_EQ(s, \"xxxXXXXxxxXXXXxxx\");\n s = absl::StrReplaceAll(\"aaa\", {{\"aa\", \"x\"}, {\"a\", \"X\"}});\n EXPECT_EQ(s, \"xX\");\n s = absl::StrReplaceAll(\"aaa\", {{\"a\", \"X\"}, {\"aa\", \"x\"}});\n EXPECT_EQ(s, \"xX\");\n s = absl::StrReplaceAll(\"the quick brown fox jumped over the lazy dogs\",\n {\n {\"brown\", \"box\"},\n {\"dogs\", \"jugs\"},\n {\"fox\", \"with\"},\n {\"jumped\", \"five\"},\n {\"over\", \"dozen\"},\n {\"quick\", \"my\"},\n {\"the\", \"pack\"},\n {\"the lazy\", \"liquor\"},\n });\n EXPECT_EQ(s, \"pack my box with five dozen liquor jugs\");\n}\nTEST(StrReplaceAll, ManyReplacementsInMap) {\n std::map replacements;\n replacements[\"$who\"] = \"Bob\";\n replacements[\"$count\"] = \"5\";\n replacements[\"#Noun\"] = \"Apples\";\n std::string s = absl::StrReplaceAll(\"$who bought $count #Noun. Thanks $who!\",\n replacements);\n EXPECT_EQ(\"Bob bought 5 Apples. Thanks Bob!\", s);\n}\nTEST(StrReplaceAll, ReplacementsInPlace) {\n std::string s = std::string(\"$who bought $count #Noun. Thanks $who!\");\n int count;\n count = absl::StrReplaceAll({{\"$count\", absl::StrCat(5)},\n {\"$who\", \"Bob\"},\n {\"#Noun\", \"Apples\"}}, &s);\n EXPECT_EQ(count, 4);\n EXPECT_EQ(\"Bob bought 5 Apples. Thanks Bob!\", s);\n}\nTEST(StrReplaceAll, ReplacementsInPlaceInMap) {\n std::string s = std::string(\"$who bought $count #Noun. Thanks $who!\");\n std::map replacements;\n replacements[\"$who\"] = \"Bob\";\n replacements[\"$count\"] = \"5\";\n replacements[\"#Noun\"] = \"Apples\";\n int count;\n count = absl::StrReplaceAll(replacements, &s);\n EXPECT_EQ(count, 4);\n EXPECT_EQ(\"Bob bought 5 Apples. Thanks Bob!\", s);\n}\nstruct Cont {\n Cont() = default;\n explicit Cont(absl::string_view src) : data(src) {}\n absl::string_view data;\n};\ntemplate \nabsl::string_view get(const Cont& c) {\n auto splitter = absl::StrSplit(c.data, ':');\n auto it = splitter.begin();\n for (int i = 0; i < index; ++i) ++it;\n return *it;\n}\nTEST(StrReplaceAll, VariableNumber) {\n std::string s;\n {\n std::vector> replacements;\n s = \"abc\";\n EXPECT_EQ(0, absl::StrReplaceAll(replacements, &s));\n EXPECT_EQ(\"abc\", s);\n s = \"abc\";\n replacements.push_back({\"a\", \"A\"});\n EXPECT_EQ(1, absl::StrReplaceAll(replacements, &s));\n EXPECT_EQ(\"Abc\", s);\n s = \"abc\";\n replacements.push_back({\"b\", \"B\"});\n EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));\n EXPECT_EQ(\"ABc\", s);\n s = \"abc\";\n replacements.push_back({\"d\", \"D\"});\n EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));\n EXPECT_EQ(\"ABc\", s);\n EXPECT_EQ(\"ABcABc\", absl::StrReplaceAll(\"abcabc\", replacements));\n }\n {\n std::map replacements;\n replacements[\"aa\"] = \"x\";\n replacements[\"a\"] = \"X\";\n s = \"aaa\";\n EXPECT_EQ(2, absl::StrReplaceAll(replacements, &s));\n EXPECT_EQ(\"xX\", s);\n EXPECT_EQ(\"xxX\", absl::StrReplaceAll(\"aaaaa\", replacements));\n }\n {\n std::list> replacements = {\n {\"a\", \"x\"}, {\"b\", \"y\"}, {\"c\", \"z\"}};\n std::string s = absl::StrReplaceAll(\"abc\", replacements);\n EXPECT_EQ(s, \"xyz\");\n }\n {\n using X = std::tuple;\n std::vector replacements(3);\n replacements[0] = X{\"a\", \"x\", 1};\n replacements[1] = X{\"b\", \"y\", 0};\n replacements[2] = X{\"c\", \"z\", -1};\n std::string s = absl::StrReplaceAll(\"abc\", replacements);\n EXPECT_EQ(s, \"xyz\");\n }\n {\n std::vector replacements(3);\n replacements[0] = Cont{\"a:x\"};\n replacements[1] = Cont{\"b:y\"};\n replacements[2] = Cont{\"c:z\"};\n std::string s = absl::StrReplaceAll(\"abc\", replacements);\n EXPECT_EQ(s, \"xyz\");\n }\n}\nTEST(StrReplaceAll, Inplace) {\n std::string s;\n int reps;\n s = \"\";\n reps = absl::StrReplaceAll({{\"\", \"\"}, {\"x\", \"\"}, {\"\", \"y\"}, {\"x\", \"y\"}}, &s);\n EXPECT_EQ(reps, 0);\n EXPECT_EQ(s, \"\");\n s = \"abc\";\n reps = absl::StrReplaceAll({{\"\", \"\"}, {\"\", \"y\"}, {\"x\", \"\"}}, &s);\n EXPECT_EQ(reps, 0);\n EXPECT_EQ(s, \"abc\");\n s = \"abc\";\n reps = absl::StrReplaceAll({{\"a\", \"x\"}, {\"b\", \"y\"}, {\"c\", \"z\"}}, &s);\n EXPECT_EQ(reps, 3);\n EXPECT_EQ(s, \"xyz\");\n s = \"zxy\";\n reps = absl::StrReplaceAll({{\"z\", \"x\"}, {\"x\", \"y\"}, {\"y\", \"z\"}}, &s);\n EXPECT_EQ(reps, 3);\n EXPECT_EQ(s, \"xyz\");\n s = \"abc\";\n reps = absl::StrReplaceAll({{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"abc\", \"xyz\"}}, &s);\n EXPECT_EQ(reps, 1);\n EXPECT_EQ(s, \"xyz\");\n s = \"Abc!\";\n reps = absl::StrReplaceAll(\n {{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"b\", \"y\"}, {\"bc\", \"yz\"}, {\"c\", \"z\"}}, &s);\n EXPECT_EQ(reps, 1);\n EXPECT_EQ(s, \"Ayz!\");\n s = \"Abc!\";\n reps = absl::StrReplaceAll(\n {{\"a\", \"x\"}, {\"ab\", \"xy\"}, {\"b\", \"y\"}, {\"bc!\", \"yz?\"}, {\"c!\", \"z;\"}}, &s);\n EXPECT_EQ(reps, 1);\n EXPECT_EQ(s, \"Ayz?\");\n s = \"ababa\";\n reps = absl::StrReplaceAll({{\"a\", \"xxx\"}, {\"b\", \"XXXX\"}}, &s);\n EXPECT_EQ(reps, 5);\n EXPECT_EQ(s, \"xxxXXXXxxxXXXXxxx\");\n s = \"aaa\";\n reps = absl::StrReplaceAll({{\"aa\", \"x\"}, {\"a\", \"X\"}}, &s);\n EXPECT_EQ(reps, 2);\n EXPECT_EQ(s, \"xX\");\n s = \"aaa\";\n reps = absl::StrReplaceAll({{\"a\", \"X\"}, {\"aa\", \"x\"}}, &s);\n EXPECT_EQ(reps, 2);\n EXPECT_EQ(s, \"xX\");\n s = \"the quick brown fox jumped over the lazy dogs\";\n reps = absl::StrReplaceAll(\n {\n {\"brown\", \"box\"},\n {\"dogs\", \"jugs\"},\n {\"fox\", \"with\"},\n {\"jumped\", \"five\"},\n {\"over\", \"dozen\"},\n {\"quick\", \"my\"},\n {\"the\", \"pack\"},\n {\"the lazy\", \"liquor\"},\n },\n &s);\n EXPECT_EQ(reps, 8);\n EXPECT_EQ(s, \"pack my box with five dozen liquor jugs\");\n}"},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_replace.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_replace_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":1188,"cells":{"ID":{"kind":"string","value":"914cb876-e268-4338-974a-bdc42eb9ee1a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"triton_tiling_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_tiling_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/layout.h\"\n#include \"xla/permutation_util.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support.h\"\n#include \"xla/service/gpu/fusions/triton/triton_support_legacy.h\"\n#include \"xla/service/instruction_fusion.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nabsl::flat_hash_map\nFilterTrivialDims(\n const absl::flat_hash_map&\n dim_iter_specs) {\n absl::flat_hash_map\n non_trivial_dim_iteration_specs;\n for (const auto& [dim, dim_spec] : dim_iter_specs) {\n if (dim_spec.size() == 1 && dim_spec[0].count == 1) {\n continue;\n }\n non_trivial_dim_iteration_specs[dim] = dim_spec;\n }\n return non_trivial_dim_iteration_specs;\n}\n} \nconst TensorIterationSpec::DimIterationSpec* TensorIterationSpec::Find(\n const int dimension) const {\n if (auto it = dim_iteration_specs_.find(dimension);\n it != dim_iteration_specs_.end()) {\n return &it->second;\n }\n return nullptr;\n}\nstd::vector TensorIterationSpec::GetDimensions() const {\n std::vector result;\n result.reserve(dim_iteration_specs_.size());\n for (const auto& [dim, _] : dim_iteration_specs_) {\n result.push_back(dim);\n }\n return result;\n}\nbool TensorIterationSpec::IsPhysicallyEquivalent(\n const TensorIterationSpec& other) const {\n const absl::flat_hash_map\n non_trivial_dim_iteration_specs = FilterTrivialDims(dim_iteration_specs_);\n const absl::flat_hash_map\n other_non_trivial_dim_iteration_specs =\n FilterTrivialDims(other.dim_iteration_specs_);\n if (non_trivial_dim_iteration_specs.size() !=\n other_non_trivial_dim_iteration_specs.size()) {\n return false;\n }\n for (const auto& pair : non_trivial_dim_iteration_specs) {\n int dimension = pair.first;\n const DimIterationSpec& dim_iter_spec = pair.second;\n auto other_it = other_non_trivial_dim_iteration_specs.find(dimension);\n if (other_it == other_non_trivial_dim_iteration_specs.end()) {\n return false;\n }\n const DimIterationSpec& other_dim_iter_spec = other_it->second;\n if (dim_iter_spec.size() != other_dim_iter_spec.size()) {\n return false;\n }\n for (size_t i = 0; i < dim_iter_spec.size(); i++) {\n if (!dim_iter_spec[i].IsPhysicallyEquivalent(other_dim_iter_spec[i])) {\n return false;\n }\n }\n }\n return true;\n}\nstd::string TensorIterationSpec::IterationSpecFragment::ToString() const {\n return absl::StrCat(\"{stride=\", stride, \", count=\", count,\n \", slice_start=\", slice_start,\n \", sliced_count=\", sliced_count, \", subfragments=[\",\n absl::StrJoin(subfragments, \", \"), \"]}\");\n}\nstd::string TensorIterationSpec::ToString() const {\n return absl::StrCat(\n \"{\",\n absl::StrJoin(dim_iteration_specs_, \", \",\n [&](std::string* s, const auto& kv) {\n absl::StrAppend(\n s, kv.first, \": \", \"[\",\n absl::StrJoin(kv.second, \", \",\n [&](std::string* ss, const auto& v) {\n absl::StrAppend(ss, v.ToString());\n }),\n \"]\");\n }),\n \"}\");\n}\nnamespace triton_fusion {\nusing Fragment = DimensionOrder::Fragment;\nusing Fragments = DimensionOrder::Fragments;\nusing FragmentOrders = DimensionOrder::FragmentOrders;\n DimensionOrder DimensionOrder::FromDotOperandOrOutput(\n const HloInstruction& hlo, const int split_k_dimension_index) {\n DimensionOrder dim_order;\n dim_order.tensor_fragments_order_.reserve(hlo.shape().rank());\n for (const int i : hlo.shape().layout().minor_to_major()) {\n int target_dim_number = i;\n if (i == split_k_dimension_index) {\n CHECK(!dim_order.tensor_fragments_order_.empty())\n << \"The split-K batch dimension has be preceded by the contracting \"\n \"dimension it originates from by construction.\";\n target_dim_number =\n dim_order.tensor_fragments_order_.back().dst_dim_number();\n }\n dim_order.dim_fragments_orders_[target_dim_number].push_back(\n dim_order.tensor_fragments_order_.size());\n dim_order.tensor_fragments_order_.push_back(\n Fragment{target_dim_number, hlo.shape().dimensions(i)});\n }\n return dim_order;\n}\nstd::string DimensionOrder::Fragment::ToString() const {\n return absl::StrCat(dst_dim_number_, \":\", count_, \":\", slice_start_, \"-\",\n sliced_count_);\n}\nstd::string DimensionOrder::ToString() const {\n std::string ret = absl::StrJoin(tensor_fragments_order_, \" - \",\n [](std::string* out, const Fragment& f) {\n absl::StrAppend(out, f.ToString(), \" \");\n });\n absl::StrAppend(&ret, \"|\");\n for (const auto& [dim, fragments] : dim_fragments_orders_) {\n absl::StrAppend(&ret, dim, \":\", absl::StrJoin(fragments, \",\"), \" \");\n }\n return ret;\n}\nTensorIterationSpec DimensionOrder::ToTensorIterationSpec() const {\n const Fragments& dim_fragments = TensorFragmentsOrder();\n TensorIterationSpec tensor_spec;\n int64_t accumulated_stride = 1;\n int last_dim = -1;\n for (int dim_order_index = 0; dim_order_index < dim_fragments.size();\n ++dim_order_index) {\n const DimensionOrder::Fragment& fragment = dim_fragments[dim_order_index];\n VLOG(6) << fragment.ToString();\n TensorIterationSpec::DimIterationSpec& dim_spec =\n tensor_spec[fragment.dst_dim_number()];\n if (last_dim == fragment.dst_dim_number()) {\n if (!dim_spec.empty() && !dim_spec.back().subfragments.empty() &&\n dim_spec.back().subfragments.back() == 1) {\n dim_spec.back().subfragments.pop_back();\n }\n if (fragment.full_count() > 1) {\n CHECK(!dim_spec.empty());\n CHECK(!dim_spec.back().is_sliced())\n << \"Only the major-most fragment can have an offset.\";\n dim_spec.back().slice_start =\n fragment.slice_start() * dim_spec.back().count;\n dim_spec.back().sliced_count =\n fragment.sliced_count() * dim_spec.back().count;\n dim_spec.back().count *= fragment.full_count();\n dim_spec.back().subfragments.push_back(fragment.sliced_count());\n }\n } else {\n dim_spec.push_back(TensorIterationSpec::IterationSpecFragment{\n accumulated_stride,\n fragment.full_count(),\n fragment.slice_start(),\n fragment.sliced_count(),\n {fragment.sliced_count()}});\n }\n accumulated_stride *= fragment.full_count();\n last_dim = fragment.dst_dim_number();\n }\n for (int dim_idx : tensor_spec.GetDimensions()) {\n TensorIterationSpec::DimIterationSpec& dim_spec = tensor_spec[dim_idx];\n if (dim_spec.size() <= 1) continue;\n TensorIterationSpec::DimIterationSpec filtered_dim_spec;\n absl::c_copy_if(dim_spec, std::back_inserter(filtered_dim_spec),\n [](const TensorIterationSpec::IterationSpecFragment& f) {\n return f.count != 1;\n });\n tensor_spec[dim_idx] = filtered_dim_spec;\n }\n tensor_spec.RemoveEmptyDimensions();\n return tensor_spec;\n}\nnamespace {\nstd::optional LogicalIndexOfLabeledDimension(\n const Shape& shape, const DimensionOrder& dim_order, const int label) {\n auto fragment_it = dim_order.TensorFragmentsOrder().cbegin();\n for (int dim : shape.layout().minor_to_major()) {\n const int64_t dim_size = shape.dimensions()[dim];\n int64_t fragments_size = 1;\n while (fragments_size < dim_size) {\n fragments_size *= fragment_it->full_count();\n if (fragment_it->dst_dim_number() == label) {\n return dim;\n }\n ++fragment_it;\n }\n }\n return std::nullopt;\n}\nusing Int64OrError = std::variant;\nInt64OrError CombineSplitDimMajorPartSizeReqs(int64_t a, int64_t b) {\n if (a == b || b == kNoSplitRequirement) {\n return a;\n }\n if (a == kNoSplitRequirement) {\n return b;\n }\n return FusionDecision::Forbid(\"Conflicting splits of splittable dimension\");\n}\n} \nDotRequirementsOrError CombineDotRequirements(\n DotRequirements a, DotRequirementsOrError b_or_error) {\n if (std::holds_alternative(b_or_error)) {\n return b_or_error;\n }\n const DotRequirements& b = std::get(b_or_error);\n Int64OrError combined_size_req =\n CombineSplitDimMajorPartSizeReqs(a.splittable_dimension_major_part_size,\n b.splittable_dimension_major_part_size);\n if (std::holds_alternative(combined_size_req)) {\n return std::get(combined_size_req);\n }\n return DotRequirements(std::get(combined_size_req));\n}\nnamespace {\nDotRequirementsOrError GetRequirementsIfSupportedOrder(\n const DimensionOrder& order, const DotProperties& properties) {\n VLOG(8) << order.ToString();\n int64_t split_dim_major_part = kNoSplitRequirement;\n const Fragments& tensor_dim_fragments = order.TensorFragmentsOrder();\n for (const auto& [dim_index, dim_fragments] : order.DimFragmentsOrders()) {\n CHECK(!dim_fragments.empty());\n for (int i = 0; i < dim_fragments.size() - 1; ++i) {\n if (tensor_dim_fragments[dim_fragments[i]].is_sliced()) {\n return FusionDecision::Forbid(\"Sliced non-major-most fragment.\");\n }\n }\n int group_counter = 0;\n int last_seen_group_last_fragment_index = -1;\n auto fragment_it = dim_fragments.cbegin();\n while (true) {\n if (fragment_it == dim_fragments.cend()) {\n break;\n }\n int64_t grouped_size = tensor_dim_fragments[*fragment_it].full_count();\n while ((fragment_it + 1) != dim_fragments.cend() &&\n *(fragment_it + 1) == *fragment_it + 1) {\n ++fragment_it;\n grouped_size *= tensor_dim_fragments[*fragment_it].full_count();\n }\n if (grouped_size == 1) {\n ++fragment_it;\n continue;\n }\n if (last_seen_group_last_fragment_index > *fragment_it) {\n return FusionDecision::Forbid(\"Transpose within a dimension.\");\n }\n ++group_counter;\n if (group_counter > 1) {\n const int splittable_dimension_index =\n properties.splittable_dimension_index;\n if (dim_index == splittable_dimension_index) {\n if (group_counter == 2) {\n if (split_dim_major_part != kNoSplitRequirement &&\n split_dim_major_part != grouped_size) {\n return FusionDecision::Forbid(\n \"Conflicting splits of splittable dimension\");\n }\n split_dim_major_part = grouped_size;\n } else if (group_counter > 2) {\n return FusionDecision::Forbid(\n \"2nd split of a splittable dimension.\");\n }\n } else {\n return FusionDecision::Forbid(\"Unsupported split of a dimension.\");\n }\n }\n last_seen_group_last_fragment_index = *fragment_it;\n ++fragment_it;\n }\n }\n return DotRequirements(split_dim_major_part);\n}\nDotRequirementsOrError GetRequirementsIfSupportedOrders(\n const HloInstruction& hlo, const DimOrderMap& dim_orders,\n const DotProperties& properties) {\n const DotRequirements empty_requirements(kNoSplitRequirement);\n auto get_requirements =\n [&](const HloInstruction& instr) -> DotRequirementsOrError {\n if (auto it = dim_orders.find(&instr); it != dim_orders.end()) {\n return GetRequirementsIfSupportedOrder(it->second, properties);\n }\n return empty_requirements;\n };\n DotRequirements requirements = empty_requirements;\n for (const HloInstruction* operand : hlo.operands()) {\n DotRequirementsOrError requirements_or_error =\n CombineDotRequirements(requirements, get_requirements(*operand));\n if (std::holds_alternative(requirements_or_error)) {\n return requirements_or_error;\n }\n requirements = std::get(requirements_or_error);\n }\n return CombineDotRequirements(requirements, get_requirements(hlo));\n}\nDimOrderMap GetPropagatedDimOrdersForElementwise(\n const HloInstruction& hlo, TransformDirection direction,\n const DimensionOrder& src_dim_order) {\n if (direction == TransformDirection::kOutputToInput) {\n DimOrderMap map;\n for (const HloInstruction* operand : hlo.operands()) {\n map.insert({operand, src_dim_order});\n }\n return map;\n }\n return {{&hlo, src_dim_order}};\n}\nconst HloInstruction& GetSourceHlo(const HloInstruction& hlo,\n TransformDirection direction) {\n CHECK_GE(hlo.operand_count(), 1);\n if (direction == TransformDirection::kOutputToInput) {\n return hlo;\n }\n return *hlo.operand(0);\n}\nusing ConstInstructionVector = absl::InlinedVector;\nConstInstructionVector GetDestHlos(const HloInstruction& hlo,\n TransformDirection direction) {\n if (direction == TransformDirection::kInputToOutput) {\n return {&hlo};\n }\n ConstInstructionVector hlos;\n hlos.reserve(hlo.operands().size());\n for (const HloInstruction* operand : hlo.operands()) {\n hlos.push_back(operand);\n }\n return hlos;\n}\nconst HloInstruction& GetDestHlo(const HloInstruction& hlo,\n TransformDirection direction) {\n CHECK_EQ(hlo.operand_count(), 1);\n if (direction == TransformDirection::kInputToOutput) {\n return hlo;\n }\n return *hlo.operand(0);\n}\nDimOrderMapOrError GetPropagatedDimOrdersForBitcast(\n const HloInstruction& hlo, const TransformDirection direction,\n const DimensionOrder& src_dim_order, const DotProperties& properties) {\n const HloInstruction& dst = GetDestHlo(hlo, direction);\n const Shape& dst_shape = dst.shape();\n const Fragments& src_fragments_order = src_dim_order.TensorFragmentsOrder();\n DimOrderMap dst_dim_orders;\n DimensionOrder& dst_dim_order =\n dst_dim_orders.insert({&dst, DimensionOrder()}).first->second;\n Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();\n int64_t dst_remaining_size = 1;\n absl::flat_hash_map> src_to_dst;\n auto dst_dim_it = dst_shape.layout().minor_to_major().cbegin();\n const auto dst_dim_end = dst_shape.layout().minor_to_major().cend();\n for (auto src_dim = src_fragments_order.cbegin();\n src_dim != src_fragments_order.cend(); ++src_dim) {\n auto add_new_fragment = [&](const Fragment& fragment) {\n dst_fragments_order.push_back(fragment);\n src_to_dst[&*src_dim].push_back(dst_fragments_order.size() - 1);\n };\n if (dst_remaining_size >= src_dim->full_count()) {\n if (dst_remaining_size % src_dim->full_count()) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(*src_dim);\n dst_remaining_size /= src_dim->full_count();\n } else {\n int64_t src_remaining_size = src_dim->full_count();\n if (dst_remaining_size > 1) {\n if (src_remaining_size % dst_remaining_size || (src_dim->is_sliced())) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(\n Fragment{src_dim->dst_dim_number(), dst_remaining_size});\n src_remaining_size /= dst_remaining_size;\n dst_remaining_size = 1;\n }\n while (src_remaining_size > 1) {\n CHECK(dst_dim_it != dst_dim_end);\n int64_t dst_dim_size = dst_shape.dimensions(*dst_dim_it);\n int64_t new_fragment_size = dst_dim_size;\n if (dst_dim_size > src_remaining_size) {\n if (dst_dim_size % src_remaining_size) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n dst_remaining_size = dst_dim_size / src_remaining_size;\n new_fragment_size = src_remaining_size;\n }\n if (src_dim->is_sliced()) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n add_new_fragment(\n Fragment{src_dim->dst_dim_number(), new_fragment_size});\n src_remaining_size /= new_fragment_size;\n ++dst_dim_it;\n }\n }\n }\n CHECK_EQ(dst_remaining_size, 1);\n while (dst_dim_it != dst_dim_end) {\n if (dst_shape.dimensions(*dst_dim_it) != 1) {\n return FusionDecision::Forbid(\"Unsupported bitcast\");\n }\n if (!dst_fragments_order.empty()) {\n dst_fragments_order.push_back(\n Fragment{dst_fragments_order.back().dst_dim_number(), 1});\n src_to_dst[&src_fragments_order.back()].push_back(\n dst_fragments_order.size() - 1);\n }\n ++dst_dim_it;\n }\n FragmentOrders& dst_dim_fragment_orders = dst_dim_order.DimFragmentsOrders();\n for (const auto& [dim_index, dim_sequence] :\n src_dim_order.DimFragmentsOrders()) {\n std::vector& dst = dst_dim_fragment_orders[dim_index];\n dst.reserve(dim_sequence.size());\n for (const int src : dim_sequence) {\n std::copy(src_to_dst[&src_fragments_order[src]].cbegin(),\n src_to_dst[&src_fragments_order[src]].cend(),\n std::back_inserter(dst));\n }\n }\n return dst_dim_orders;\n}\nDimOrderMapOrError GetPropagatedDimOrdersForDimAlteringOp(\n const HloInstruction& hlo, const TransformDirection direction,\n const DimensionOrder& src_dim_order, const DotProperties& properties) {\n std::list new_fragments;\n const HloInstruction& src = GetSourceHlo(hlo, direction);\n Fragments src_fragments_order = src_dim_order.TensorFragmentsOrder();\n if (hlo.opcode() == HloOpcode::kSlice &&\n ShapeUtil::IsEffectiveScalar(hlo.shape())) {\n return FusionDecision::Forbid(\"Slice to scalar is not implemented yet.\");\n }\n std::vector> src_physical;\n src_physical.reserve(src.shape().rank());\n if (src_fragments_order.size() < src.shape().rank()) {\n return FusionDecision::Forbid(\n \"Cannot propagate further from trivial sized tensor\");\n }\n auto src_fragment_it = src_fragments_order.begin();\n for (int64_t dim_index : src.shape().layout().minor_to_major()) {\n const int64_t dim_size = src.shape().dimensions(dim_index);\n int64_t subdim_size_accumulator = 1;\n std::vector subdim_group;\n do {\n CHECK(src_fragment_it != src_fragments_order.end());\n subdim_size_accumulator *= src_fragment_it->full_count();\n subdim_group.push_back(&*src_fragment_it);\n ++src_fragment_it;\n } while (subdim_size_accumulator < dim_size);\n CHECK_EQ(subdim_size_accumulator, dim_size);\n src_physical.push_back(subdim_group);\n }\n std::vector> src_logical;\n src_logical.resize(src_physical.size());\n for (int i = 0; i < src_physical.size(); ++i) {\n src_logical[src.shape().layout().minor_to_major(i)] = src_physical[i];\n }\n DimOrderMap dst_dim_orders;\n int64_t concat_accumulated_size = 0;\n for (const HloInstruction* dst : GetDestHlos(hlo, direction)) {\n DimensionOrder& dst_dim_order =\n dst_dim_orders.insert({dst, DimensionOrder()}).first->second;\n std::vector> dst_logical;\n if (hlo.opcode() == HloOpcode::kTranspose) {\n const auto* transpose = Cast(&hlo);\n std::vector permutation(transpose->dimensions().cbegin(),\n transpose->dimensions().cend());\n if (direction == TransformDirection::kInputToOutput) {\n permutation = InversePermutation(permutation);\n }\n dst_logical.resize(permutation.size());\n for (int i = 0; i < permutation.size(); ++i) {\n dst_logical[permutation[i]] = src_logical[i];\n }\n } else if (hlo.opcode() == HloOpcode::kBroadcast) {\n const auto* broadcast = Cast(&hlo);\n dst_logical.resize(broadcast->dimensions().size());\n for (int i = 0; i < broadcast->dimensions().size(); ++i) {\n dst_logical[i] = src_logical[broadcast->dimensions()[i]];\n }\n } else if (hlo.opcode() == HloOpcode::kReduce) {\n if (dst != &hlo && hlo.operand_index(dst) == 1) {\n continue;\n }\n const auto* reduce = Cast(&hlo);\n dst_logical.resize(src_logical.size() + reduce->dimensions().size());\n if (reduce->dimensions().size() != 1) {\n return FusionDecision::Forbid(\"Unsupported reduction.\");\n } else if (reduce->dimensions().front() !=\n reduce->operand(0)->shape().rank() - 1) {\n return FusionDecision::Forbid(\"Only row reductions are supported.\");\n }\n } else if (hlo.opcode() == HloOpcode::kConcatenate) {\n dst_logical.resize(src_logical.size());\n for (int i = 0; i < src_logical.size(); ++i) {\n if (i == hlo.concatenate_dimension()) {\n if (src_logical[i].size() != 1 || src_logical[i][0]->is_sliced()) {\n return FusionDecision::Forbid(\"Unsupported concatenation.\");\n }\n const Fragment& src_fragment = *src_logical[i][0];\n Fragment& dst_fragment = new_fragments.emplace_back(\n src_fragment.dst_dim_number(), dst->shape().dimensions(i));\n dst_fragment.set_slice(-concat_accumulated_size,\n dst->shape().dimensions(i));\n concat_accumulated_size += dst->shape().dimensions(i);\n dst_logical[i].push_back(&dst_fragment);\n } else {\n dst_logical[i] = src_logical[i];\n }\n }\n } else if (hlo.opcode() == HloOpcode::kCopy) {\n CHECK(ShapeUtil::SameDimensions(src.shape(), dst->shape()));\n dst_logical = src_logical;\n } else if (hlo.opcode() == HloOpcode::kPad) {\n if (dst != &hlo && hlo.operand_index(dst) == 1) {\n continue;\n }\n const auto* pad = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int i = 0; i < src_logical.size(); ++i) {\n const int padding =\n pad->padding_config().dimensions(i).edge_padding_high();\n CHECK_EQ(pad->padding_config().dimensions(i).edge_padding_low(), 0);\n CHECK_EQ(pad->padding_config().dimensions(i).interior_padding(), 0);\n if (padding == 0) {\n dst_logical[i] = src_logical[i];\n } else {\n const std::vector& fragments = src_logical[i];\n CHECK_GE(fragments.size(), 2);\n CHECK(absl::c_all_of(fragments, [&](const Fragment* fragment) {\n return fragment->dst_dim_number() ==\n fragments.front()->dst_dim_number();\n }));\n std::vector non_trivial_fragments;\n absl::c_copy_if(fragments, std::back_inserter(non_trivial_fragments),\n [](const Fragment* fragment) {\n return fragment->full_count() > 1;\n });\n CHECK_EQ(non_trivial_fragments.size(), 2);\n new_fragments.emplace_back(\n non_trivial_fragments[0]->dst_dim_number(),\n non_trivial_fragments[0]->full_count() *\n non_trivial_fragments[1]->full_count() -\n padding);\n dst_logical[i] = {&new_fragments.back()};\n }\n }\n } else if (hlo.opcode() == HloOpcode::kSlice) {\n const auto slice = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int dim = 0; dim < src_logical.size(); ++dim) {\n dst_logical[dim] = src_logical[dim];\n if (slice->slice_limits(dim) - slice->slice_starts(dim) !=\n dst->shape().dimensions(dim)) {\n if (dst_logical[dim].size() > 1) {\n return FusionDecision::Forbid(\"Slicing of fragmented dimension.\");\n }\n auto fragment = dst_logical[dim].front();\n fragment->set_count(dst->shape().dimensions(dim));\n fragment->set_slice(\n fragment->slice_start() + slice->slice_starts(dim),\n fragment->sliced_count());\n }\n }\n } else if (hlo.opcode() == HloOpcode::kDynamicSlice) {\n if (dst != &hlo && hlo.operand_index(dst) >= 1) {\n continue;\n }\n const auto dynamic_slice = Cast(&hlo);\n dst_logical.resize(src_logical.size());\n for (int dim = 0; dim < src_logical.size(); ++dim) {\n dst_logical[dim] = src_logical[dim];\n if (dynamic_slice->slice_sizes(dim) != dst->shape().dimensions(dim)) {\n if (dst_logical[dim].size() > 1) {\n return FusionDecision::Forbid(\"Slicing of fragmented dimension.\");\n }\n auto fragment = dst_logical[dim].front();\n fragment->set_count(dst->shape().dimensions(dim));\n fragment->set_slice(fragment->slice_start(),\n dst->shape().dimensions(dim));\n }\n }\n } else {\n return FusionDecision::Forbid(\"Function called on a wrong instruction.\");\n }\n absl::flat_hash_map src_to_dst;\n Fragments& dst_fragments_order = dst_dim_order.TensorFragmentsOrder();\n FragmentOrders& dst_dim_fragments_order =\n dst_dim_order.DimFragmentsOrders();\n absl::flat_hash_set dim_numbers_present_in_dst;\n for (const int64_t dim_idx : dst->shape().layout().minor_to_major()) {\n for (const Fragment* subdim : dst_logical[dim_idx]) {\n dst_fragments_order.push_back(*subdim);\n src_to_dst[subdim] = dst_fragments_order.size() - 1;\n dim_numbers_present_in_dst.insert(subdim->dst_dim_number());\n }\n }\n for (const auto& [dim_index, dim_sequence] :\n src_dim_order.DimFragmentsOrders()) {\n for (const int fragment_number : dim_sequence) {\n const auto it = src_to_dst.find(&src_fragments_order[fragment_number]);\n if (it == src_to_dst.cend()) {\n if (hlo.opcode() == HloOpcode::kBroadcast &&\n src_fragments_order[fragment_number].full_count() > 1 &&\n dim_numbers_present_in_dst.contains(dim_index)) {\n return FusionDecision::Forbid(\"Unsupported broadcast\");\n }\n continue;\n }\n dst_dim_fragments_order[dim_index].push_back(it->second);\n }\n }\n }\n return dst_dim_orders;\n}\nDimOrderMapOrError GetPropagatedDimOrders(const HloInstruction& hlo,\n const TransformDirection direction,\n const DimensionOrder& src_dim_order,\n const DotProperties& properties) {\n VLOG(7) << \"Analyzing \" << hlo.ToString();\n if (hlo.opcode() != HloOpcode::kParameter &&\n direction == TransformDirection::kOutputToInput &&\n absl::c_any_of(hlo.users(), [](const HloInstruction* user) {\n return (user->opcode() == HloOpcode::kConcatenate ||\n user->opcode() == HloOpcode::kDynamicSlice);\n })) {\n return FusionDecision::Forbid(\n \"No fusion into concatenations or dynamic slice.\");\n }\n if (hlo.opcode() == HloOpcode::kParameter ||\n hlo_query::IsScalarConstant(&hlo)) {\n CHECK(direction == TransformDirection::kOutputToInput);\n return DimOrderMap{};\n } else if (hlo.opcode() == HloOpcode::kTranspose ||\n hlo.opcode() == HloOpcode::kCopy) {\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kBroadcast) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported broadcast direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kPad) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported pad direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.operand_count() > 0 &&\n legacy_triton::IsTritonSupportedElementwiseUpToFloatNormalization(\n hlo.opcode(), hlo.operand(0)->shape().element_type())) {\n return GetPropagatedDimOrdersForElementwise(hlo, direction, src_dim_order);\n } else if (hlo.opcode() == HloOpcode::kBitcast) {\n return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kSlice) {\n if (direction != TransformDirection::kOutputToInput) {\n return FusionDecision::Forbid(\"Unsupported slice direction.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kDynamicSlice &&\n direction == TransformDirection::kOutputToInput) {\n if (CodegenDecision decision = legacy_triton::IsTritonSupportedDynamicSlice(\n *Cast(&hlo));\n !decision.CanFuse()) {\n return decision;\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kReshape) {\n if (!ShapeUtil::ReshapeIsBitcast(hlo.operand(0)->shape(), hlo.shape())) {\n return FusionDecision::Forbid(\"Non-bitcast reshape.\");\n }\n return GetPropagatedDimOrdersForBitcast(hlo, direction, src_dim_order,\n properties);\n } else if (hlo.opcode() == HloOpcode::kConcatenate &&\n direction == TransformDirection::kOutputToInput) {\n int64_t noncontracting_dim_label = properties.noncontracting_dimension;\n const FragmentOrders& src_dim_fragments_orders =\n src_dim_order.DimFragmentsOrders();\n auto noncontracting_dim_fragment_order_it =\n src_dim_fragments_orders.find(noncontracting_dim_label);\n if (noncontracting_dim_fragment_order_it !=\n src_dim_fragments_orders.end()) {\n if (noncontracting_dim_fragment_order_it->second.size() > 1) {\n return FusionDecision::Forbid(\n \"Concatenations on split non-contracting dimensions are \"\n \"unsupported.\");\n }\n }\n auto dim = LogicalIndexOfLabeledDimension(hlo.shape(), src_dim_order,\n noncontracting_dim_label);\n if (!dim.has_value() || dim.value() != hlo.concatenate_dimension()) {\n return FusionDecision::Forbid(\"Unsupported concatenation.\");\n }\n if (absl::c_any_of(hlo.operands(), [&hlo](const HloInstruction* operand) {\n constexpr int kMinConcatFragmentSize = 64;\n return operand->shape().dimensions(hlo.concatenate_dimension()) %\n kMinConcatFragmentSize !=\n 0;\n })) {\n return FusionDecision::Forbid(\n \"At least one operand of concatenation can not be perfectly tiled.\");\n }\n return GetPropagatedDimOrdersForDimAlteringOp(hlo, direction, src_dim_order,\n properties);\n }\n return FusionDecision::Forbid(\"Unimplemented instruction.\");\n}\nint64_t InputMinusOutputBytes(const HloInstruction& hlo) {\n CHECK(!hlo.shape().IsTuple());\n int64_t input_size = 0;\n for (const HloInstruction* operand : hlo.operands()) {\n CHECK(!operand->shape().IsTuple());\n input_size += ShapeUtil::ByteSizeOf(operand->shape());\n }\n return input_size - ShapeUtil::ByteSizeOf(hlo.shape());\n}\nbool CanNotBeFusedIntoAUser(const HloInstruction& hlo) {\n return hlo.IsRoot() || (hlo.user_count() == 1 && hlo.users()[0]->IsRoot() &&\n hlo.users()[0]->opcode() == HloOpcode::kTuple);\n}\nconstexpr int kIoToleranceBytes = 1024;\nbool IsInputWorthFusing(const HloInstruction& hlo) {\n if (InputMinusOutputBytes(hlo) <= kIoToleranceBytes) {\n return true;\n }\n if (hlo.user_count() > 1) {\n return false;\n }\n if (hlo.opcode() == HloOpcode::kSlice &&\n hlo_query::AllOperandsAreParametersOrConstants(hlo)) {\n return true;\n }\n return hlo_query::AllOperandsAreParametersOrConstantsWithSingleUser(hlo);\n}\nbool IsOutputWorthFusing(const HloInstruction& hlo) {\n return CanNotBeFusedIntoAUser(hlo) ||\n InputMinusOutputBytes(hlo) >= -kIoToleranceBytes;\n}\nFusionDecision IsConversionWorthFusing(const HloInstruction& input,\n se::GpuComputeCapability gpu_version) {\n if (ShapeUtil::ByteSizeOf(input.operand(0)->shape()) >\n ShapeUtil::ByteSizeOf(input.shape())) {\n return FusionDecision::Forbid(\"Narrowing conversion.\");\n }\n return FusionDecision::Allow();\n}\n} \nDimOrdersAndReqsOrError GetPropagatedDimOrdersAndRequirements(\n const HloInstruction& hlo, const DimensionOrder& src_dim_order,\n TransformDirection direction, const DotProperties& properties) {\n DimOrderMapOrError propagated_dim_orders_or_error =\n GetPropagatedDimOrders(hlo, direction, src_dim_order, properties);\n if (std::holds_alternative(propagated_dim_orders_or_error)) {\n return std::get(propagated_dim_orders_or_error);\n }\n DimOrderMap propagated_dim_orders =\n std::move(std::get(propagated_dim_orders_or_error));\n DotRequirementsOrError requirements_or_error =\n GetRequirementsIfSupportedOrders(hlo, propagated_dim_orders, properties);\n if (std::holds_alternative(requirements_or_error)) {\n return std::get(requirements_or_error);\n }\n return DimOrdersAndReqs{propagated_dim_orders,\n std::get(requirements_or_error)};\n}\nDimOrdersAndReqsOrError\nGetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(\n const HloInstruction& hlo, TransformDirection transform_direction,\n const std::optional& src_operand_index,\n const DimensionOrder& src_dim_order,\n const se::GpuComputeCapability& gpu_version,\n const DotProperties& properties) {\n CHECK_EQ(transform_direction == TransformDirection::kInputToOutput,\n src_operand_index.has_value());\n if (hlo.opcode() == HloOpcode::kTuple ||\n hlo.opcode() == HloOpcode::kGetTupleElement) {\n return FusionDecision::Forbid(\"Unsupported instruction.\");\n }\n if (hlo.opcode() == HloOpcode::kReduce ||\n hlo.opcode() == HloOpcode::kAllReduce ||\n hlo.opcode() == HloOpcode::kAllReduceStart ||\n hlo.opcode() == HloOpcode::kAllReduceDone) {\n return FusionDecision::Forbid(\"Reductions are not fused yet.\");\n }\n if (hlo.opcode() == HloOpcode::kPad) {\n return FusionDecision::Forbid(\"Pads are not fused yet.\");\n }\n if (auto decision =\n legacy_triton::IsTritonSupportedInstruction(hlo, gpu_version);\n !decision.CanFuse()) {\n return decision;\n }\n DimOrdersAndReqsOrError result_or_error =\n GetPropagatedDimOrdersAndRequirements(hlo, src_dim_order,\n transform_direction, properties);\n if (std::holds_alternative(result_or_error)) {\n VLOG(5) << \"Not fusing \" << hlo.ToString()\n << \" to the output due to the decision: \"\n << std::get(result_or_error).Explain();\n return result_or_error;\n }\n DimOrdersAndReqs dim_orders_and_requirements =\n std::move(std::get(result_or_error));\n int fusion_level =\n hlo.GetModule()->config().debug_options().xla_gpu_triton_fusion_level();\n if (transform_direction == TransformDirection::kOutputToInput) {\n if (fusion_level < 2) {\n if (hlo.opcode() == HloOpcode::kConvert) {\n if (FusionDecision decision = IsConversionWorthFusing(hlo, gpu_version);\n !decision) {\n return decision;\n }\n } else if (hlo.IsElementwise() && hlo.opcode() != HloOpcode::kCopy) {\n return FusionDecision::Forbid(\"Ignored elementwise operation\");\n }\n } else {\n bool accepted = false;\n if (hlo.IsElementwise() && hlo.operand_count() == 2) {\n for (const HloInstruction* operand : hlo.operands()) {\n if (operand->opcode() == HloOpcode::kBroadcast &&\n (operand->operand(0)->opcode() == HloOpcode::kParameter ||\n operand->operand(0)->opcode() == HloOpcode::kConstant) &&\n std::holds_alternative(\n GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible(\n *operand, TransformDirection::kOutputToInput,\n std::nullopt,\n dim_orders_and_requirements.dim_orders.at(operand),\n gpu_version, properties))) {\n accepted = true;\n break;\n }\n }\n }\n if (!accepted && !IsInputWorthFusing(hlo)) {\n return FusionDecision::Forbid(\n \"Not obviously profitable to fuse as input.\");\n }\n }\n } else {\n if (fusion_level < 2) {\n return FusionDecision::Forbid(\n \"Skipping fusing outputs at low fusion levels.\");\n }\n for (int i = 0; i < hlo.operand_count(); ++i) {\n const HloInstruction* operand = hlo.operand(i);\n if (i == *src_operand_index) {\n continue;\n }\n if ((operand->opcode() == HloOpcode::kBroadcast &&\n ShapeUtil::IsScalar(operand->operand(0)->shape())) ||\n operand->opcode() == HloOpcode::kParameter) {\n continue;\n }\n return FusionDecision::Forbid(\n \"Has multiple inputs - not properly analyzed yet.\");\n }\n if (!IsOutputWorthFusing(hlo)) {\n return FusionDecision::Forbid(\n \"Not obviously profitable to fuse as output.\");\n }\n }\n return dim_orders_and_requirements;\n}\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/triton_tiling_propagation.h\"\n#include \n#include \n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla::gpu {\nnamespace {\nusing TritonTilingPropagationTest = HloTestBase;\nusing triton_fusion::DimensionOrder;\nDimensionOrder FromFragments(DimensionOrder::Fragments fragments) {\n DimensionOrder dim_order;\n DimensionOrder::Fragments& tensor_fragments_order =\n dim_order.TensorFragmentsOrder();\n DimensionOrder::FragmentOrders& dim_fragments_orders =\n dim_order.DimFragmentsOrders();\n for (const DimensionOrder::Fragment& fragment : fragments) {\n tensor_fragments_order.push_back(fragment);\n dim_fragments_orders[fragment.dst_dim_number()].push_back(\n tensor_fragments_order.size());\n }\n return dim_order;\n}\nTEST_F(\n TritonTilingPropagationTest,\n DimensionOrdersRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {\n DimensionOrder::Fragment fragment_1(0, 97);\n DimensionOrder::Fragment fragment_2(0, 1);\n DimensionOrder dimension_order_1 = FromFragments({fragment_1, fragment_2});\n DimensionOrder::Fragment fragment_3(0, 97);\n DimensionOrder::Fragment fragment_4(1, 1);\n DimensionOrder dimension_order_2 = FromFragments({fragment_3, fragment_4});\n EXPECT_TRUE(dimension_order_1.IsPhysicallyEquivalent(dimension_order_2));\n}\nTEST_F(\n TritonTilingPropagationTest,\n IterationSpecsRemainPhysicallyEquivalentAfterInsertingTrivialDimensions) {\n TensorIterationSpec::IterationSpecFragment fragment_1 = {\n 1, 97, 0, 97,\n {97}};\n TensorIterationSpec spec_1;\n spec_1[0].push_back(fragment_1);\n TensorIterationSpec::IterationSpecFragment fragment_2 = {\n 1, 97, 0, 97,\n {97}};\n TensorIterationSpec::IterationSpecFragment fragment_3 = {\n 97, 1, 0, 1,\n {1}};\n TensorIterationSpec spec_2;\n spec_2[0].push_back(fragment_2);\n spec_2[1].push_back(fragment_3);\n EXPECT_TRUE(spec_1.IsPhysicallyEquivalent(spec_2));\n}\nTEST_F(TritonTilingPropagationTest,\n DimensionsShouldNotBeRemovedByToTensorIterationSpec) {\n DimensionOrder::Fragment fragment_0(0, 97);\n DimensionOrder::Fragment fragment_1(1, 1);\n DimensionOrder dimension_order = FromFragments({fragment_0, fragment_1});\n TensorIterationSpec spec = dimension_order.ToTensorIterationSpec();\n const TensorIterationSpec::DimIterationSpec* dim_spec_0 = spec.Find(0);\n EXPECT_NE(dim_spec_0, nullptr);\n EXPECT_EQ(dim_spec_0->size(), 1);\n EXPECT_EQ(dim_spec_0->at(0).count, 97);\n const TensorIterationSpec::DimIterationSpec* dim_spec_1 = spec.Find(1);\n EXPECT_NE(dim_spec_1, nullptr);\n EXPECT_EQ(dim_spec_1->size(), 1);\n EXPECT_EQ(dim_spec_1->at(0).count, 1);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_tiling_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1189,"cells":{"ID":{"kind":"string","value":"94fd6b98-724b-4a98-b0f9-05dcbf12c062"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"async_wrapper"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/async_wrapper.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/transforms/async_wrapper.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla::gpu {\nabsl::StatusOr AsyncWrapper::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n XLA_VLOG_LINES(\n 1, absl::StrCat(\"AsyncWrapper will process the following module:\\n\",\n module->ToString()));\n std::deque computations;\n computations.push_back(module->entry_computation());\n while (!computations.empty()) {\n HloComputation* computation = computations.front();\n computations.pop_front();\n for (HloInstruction* instruction :\n computation->MakeInstructionPostOrder()) {\n if (predicate_(instruction)) {\n XLA_VLOG_LINES(\n 1, absl::StrCat(\n \"AsyncWrapper will make the following instruction async:\\n\",\n instruction->ToString()));\n TF_RETURN_IF_ERROR(\n computation\n ->CreateAsyncInstructions(instruction,\n {ShapeUtil::MakeScalarShape(U32)})\n .status());\n changed = true;\n continue;\n }\n if (instruction->opcode() == HloOpcode::kCall) {\n std::copy(instruction->called_computations().begin(),\n instruction->called_computations().end(),\n std::back_inserter(computations));\n }\n }\n }\n XLA_VLOG_LINES(\n 1,\n absl::StrCat(\"AsyncWrapper finished processing the following module:\\n\",\n module->ToString()));\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/transforms/async_wrapper.h\"\n#include \n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/pass/hlo_pass_interface.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/tests/verified_hlo_module.h\"\n#include \"tsl/platform/status_matchers.h\"\nnamespace xla::gpu {\nnamespace {\nusing ::tsl::testing::IsOkAndHolds;\nclass AsyncWrapperTest : public HloTestBase {};\nint CountAsyncInstructions(HloComputation* computation) {\n int count = 0;\n for (const HloInstruction* instruction : computation->instructions()) {\n if (instruction->IsAsynchronous()) ++count;\n }\n return count;\n}\nTEST_F(AsyncWrapperTest, BasicFusion) {\n const char* hlo_text = R\"(\n HloModule m\n double1 {\n p0 = f32[1] parameter(0)\n ROOT add = f32[1] add(p0, p0)\n }\n double2 {\n p0 = f32[1] parameter(0)\n ROOT add = f32[1] add(p0, p0)\n }\n ENTRY main {\n p0 = f32[1] parameter(0)\n agg1 = f32[1] fusion(p0), kind=kLoop, calls=double1\n agg2 = f32[1] fusion(p0), kind=kLoop, calls=double2\n ROOT done = f32[1] add(agg1, agg2)\n })\";\n std::unique_ptr module =\n ParseAndReturnVerifiedModule(hlo_text).value();\n AsyncWrapper wrapper([](const HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kFusion;\n });\n EXPECT_THAT(wrapper.HloModulePass::Run(module.get()), IsOkAndHolds(true));\n EXPECT_EQ(CountAsyncInstructions(module->entry_computation()), 4);\n Literal argument = LiteralUtil::CreateR1({1.0});\n Literal expected = LiteralUtil::CreateR1({4.0});\n Literal result = ExecuteNoHloPasses(std::move(module), {&argument});\n EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1190,"cells":{"ID":{"kind":"string","value":"72eef4bf-eeb4-4ef8-b020-011e9d46a778"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"broadcast_to_op"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/broadcast_to_op_test.cc"},"Code":{"kind":"string","value":"#include \n#include \"tensorflow/compiler/tf2xla/lib/broadcast.h\"\n#include \"tensorflow/compiler/tf2xla/xla_op_kernel.h\"\n#include \"tensorflow/compiler/tf2xla/xla_op_registry.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"tensorflow/core/platform/macros.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace {\nclass BroadcastToOp : public XlaOpKernel {\n public:\n explicit BroadcastToOp(OpKernelConstruction* context)\n : XlaOpKernel(context) {}\n void Compile(XlaOpKernelContext* context) override {\n TensorShape output_shape;\n OP_REQUIRES_OK(context,\n context->ConstantInputAsShape(\n 1, &output_shape, xla::ValueInferenceMode::kUpperBound));\n auto output_status_or =\n BroadcastTo(context->Input(0), output_shape.dim_sizes());\n OP_REQUIRES_OK(context, output_status_or.status());\n auto output = output_status_or.value();\n std::vector dynamic_dims;\n OP_REQUIRES_OK(\n context, context->ResolveInputDynamismIntoPredVector(1, &dynamic_dims));\n for (int64_t dim = 0; dim < dynamic_dims.size(); ++dim) {\n if (dynamic_dims[dim]) {\n output = xla::SetDimensionSize(\n output,\n xla::Reshape(xla::Slice(context->Input(1), {dim}, {dim + 1}, {1}),\n {}),\n dim);\n }\n }\n context->SetOutput(0, output);\n }\n};\nREGISTER_XLA_OP(Name(\"BroadcastTo\").CompileTimeConstantInput(\"shape\"),\n BroadcastToOp);\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/kernel_benchmark_testlib.h\"\n#include \"tensorflow/core/framework/node_def_builder.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/graph/node_builder.h\"\n#include \"tensorflow/core/kernels/ops_testutil.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\nnamespace tensorflow {\ntemplate \nstatic Graph* BroadcastTo(int dim0, int dim1, InputShape input_shape) {\n Graph* g = new Graph(OpRegistry::Global());\n Tensor input(DT_FLOAT, input_shape(dim0, dim1));\n input.flat() = input.flat().setRandom();\n Tensor shape(DT_INT32, TensorShape({2}));\n shape.flat()(0) = dim0;\n shape.flat()(1) = dim1;\n Node* node;\n TF_CHECK_OK(NodeBuilder(g->NewName(\"n\"), \"BroadcastTo\")\n .Input(test::graph::Constant(g, input))\n .Input(test::graph::Constant(g, shape))\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"Tidx\", DT_INT32)\n .Finalize(g, &node));\n return g;\n}\n#define BM_BroadcastTo_InnerDim(DIM0, DIM1, type) \\\n static void BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1( \\\n ::testing::benchmark::State& state) { \\\n test::Benchmark(#type, \\\n BroadcastTo(DIM0, DIM1, \\\n [](int dim0, int dim1) { \\\n return TensorShape({dim0, 1}); \\\n }), \\\n false) \\\n .Run(state); \\\n state.SetItemsProcessed(static_cast(state.iterations()) * DIM0 * \\\n DIM1); \\\n } \\\n BENCHMARK(BM_BroadcastTo_Inner##_##type##_##DIM0##_##DIM1)->UseRealTime();\n#define BM_BroadcastTo_OuterDim(DIM0, DIM1, type) \\\n static void BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1( \\\n ::testing::benchmark::State& state) { \\\n test::Benchmark(#type, \\\n BroadcastTo(DIM0, DIM1, \\\n [](int dim0, int dim1) { \\\n return TensorShape({1, dim1}); \\\n }), \\\n false) \\\n .Run(state); \\\n state.SetItemsProcessed(static_cast(state.iterations()) * DIM0 * \\\n DIM1); \\\n } \\\n BENCHMARK(BM_BroadcastTo_Outer##_##type##_##DIM0##_##DIM1)->UseRealTime();\nBM_BroadcastTo_InnerDim(64, 64, cpu);\nBM_BroadcastTo_InnerDim(128, 128, cpu);\nBM_BroadcastTo_InnerDim(256, 256, cpu);\nBM_BroadcastTo_InnerDim(512, 512, cpu);\nBM_BroadcastTo_InnerDim(1024, 1024, cpu);\nBM_BroadcastTo_InnerDim(500, 20000, cpu);\nBM_BroadcastTo_OuterDim(64, 64, cpu);\nBM_BroadcastTo_OuterDim(128, 128, cpu);\nBM_BroadcastTo_OuterDim(256, 256, cpu);\nBM_BroadcastTo_OuterDim(512, 512, cpu);\nBM_BroadcastTo_OuterDim(1024, 1024, cpu);\nBM_BroadcastTo_OuterDim(500, 20000, cpu);\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/broadcast_to_op.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/broadcast_to_op_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1191,"cells":{"ID":{"kind":"string","value":"3c74241c-37bf-4b8c-8147-6df223966067"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"stringify"},"File Path in Repository":{"kind":"string","value":"tensorstore/internal/preprocessor/stringify.h"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/internal/preprocessor/stringify_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_\n#define TENSORSTORE_INTERNAL_PREPROCESSOR_STRINGIFY_H_\n#define TENSORSTORE_PP_STRINGIFY(...) TENSORSTORE_PP_STRINGIFY_IMPL(__VA_ARGS__)\n#define TENSORSTORE_PP_STRINGIFY_IMPL(...) #__VA_ARGS__\n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/internal/preprocessor/stringify.h\"\n#include \nnamespace {\ninline constexpr bool Equal(std::string_view a, std::string_view b) {\n return a == b;\n}\n#define X abc\n#define Y abc, def\nstatic_assert(Equal(TENSORSTORE_PP_STRINGIFY(X), \"abc\"));\nstatic_assert(Equal(TENSORSTORE_PP_STRINGIFY(Y), \"abc, def\"));\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/preprocessor/stringify_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":1192,"cells":{"ID":{"kind":"string","value":"7221b1d8-3e12-4d64-a79a-9293f0400d31"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"test_utils"},"File Path in Repository":{"kind":"string","value":"quiche/http2/adapter/test_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"quiche/http2/adapter/test_utils_test.cc"},"Code":{"kind":"string","value":"#include \"quiche/http2/adapter/test_utils.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/str_format.h\"\n#include \"quiche/http2/adapter/http2_visitor_interface.h\"\n#include \"quiche/http2/core/spdy_protocol.h\"\n#include \"quiche/http2/hpack/hpack_encoder.h\"\n#include \"quiche/common/quiche_data_reader.h\"\nnamespace http2 {\nnamespace adapter {\nnamespace test {\nnamespace {\nusing ConnectionError = Http2VisitorInterface::ConnectionError;\nstd::string EncodeHeaders(const quiche::HttpHeaderBlock& entries) {\n spdy::HpackEncoder encoder;\n encoder.DisableCompression();\n return encoder.EncodeHeaderBlock(entries);\n}\n} \nTestVisitor::DataFrameHeaderInfo TestVisitor::OnReadyToSendDataForStream(\n Http2StreamId stream_id, size_t max_length) {\n auto it = data_map_.find(stream_id);\n if (it == data_map_.end()) {\n QUICHE_DVLOG(1) << \"Source not in map; returning blocked.\";\n return {0, false, false};\n }\n DataPayload& payload = it->second;\n if (payload.return_error) {\n QUICHE_DVLOG(1) << \"Simulating error response for stream \" << stream_id;\n return {DataFrameSource::kError, false, false};\n }\n const absl::string_view prefix = payload.data.GetPrefix();\n const size_t frame_length = std::min(max_length, prefix.size());\n const bool is_final_fragment = payload.data.Read().size() <= 1;\n const bool end_data =\n payload.end_data && is_final_fragment && frame_length == prefix.size();\n const bool end_stream = payload.end_stream && end_data;\n return {static_cast(frame_length), end_data, end_stream};\n}\nbool TestVisitor::SendDataFrame(Http2StreamId stream_id,\n absl::string_view frame_header,\n size_t payload_bytes) {\n const int64_t frame_result = OnReadyToSend(frame_header);\n if (frame_result < 0 ||\n static_cast(frame_result) != frame_header.size()) {\n return false;\n }\n auto it = data_map_.find(stream_id);\n if (it == data_map_.end()) {\n if (payload_bytes > 0) {\n return false;\n } else {\n return true;\n }\n }\n DataPayload& payload = it->second;\n absl::string_view frame_payload = payload.data.GetPrefix();\n if (frame_payload.size() < payload_bytes) {\n return false;\n }\n frame_payload = frame_payload.substr(0, payload_bytes);\n const int64_t payload_result = OnReadyToSend(frame_payload);\n if (payload_result < 0 ||\n static_cast(payload_result) != frame_payload.size()) {\n return false;\n }\n payload.data.RemovePrefix(payload_bytes);\n return true;\n}\nvoid TestVisitor::AppendPayloadForStream(Http2StreamId stream_id,\n absl::string_view payload) {\n auto char_data = std::unique_ptr(new char[payload.size()]);\n std::copy(payload.begin(), payload.end(), char_data.get());\n data_map_[stream_id].data.Append(std::move(char_data), payload.size());\n}\nvoid TestVisitor::SetEndData(Http2StreamId stream_id, bool end_stream) {\n DataPayload& payload = data_map_[stream_id];\n payload.end_data = true;\n payload.end_stream = end_stream;\n}\nvoid TestVisitor::SimulateError(Http2StreamId stream_id) {\n DataPayload& payload = data_map_[stream_id];\n payload.return_error = true;\n}\nstd::pair TestVisitor::PackMetadataForStream(\n Http2StreamId stream_id, uint8_t* dest, size_t dest_len) {\n auto it = outbound_metadata_map_.find(stream_id);\n if (it == outbound_metadata_map_.end()) {\n return {-1, false};\n }\n const size_t to_copy = std::min(it->second.size(), dest_len);\n auto* src = reinterpret_cast(it->second.data());\n std::copy(src, src + to_copy, dest);\n it->second = it->second.substr(to_copy);\n if (it->second.empty()) {\n outbound_metadata_map_.erase(it);\n return {to_copy, true};\n }\n return {to_copy, false};\n}\nvoid TestVisitor::AppendMetadataForStream(\n Http2StreamId stream_id, const quiche::HttpHeaderBlock& payload) {\n outbound_metadata_map_.insert({stream_id, EncodeHeaders(payload)});\n}\nVisitorDataSource::VisitorDataSource(Http2VisitorInterface& visitor,\n Http2StreamId stream_id)\n : visitor_(visitor), stream_id_(stream_id) {}\nbool VisitorDataSource::send_fin() const { return has_fin_; }\nstd::pair VisitorDataSource::SelectPayloadLength(\n size_t max_length) {\n auto [payload_length, end_data, end_stream] =\n visitor_.OnReadyToSendDataForStream(stream_id_, max_length);\n has_fin_ = end_stream;\n return {payload_length, end_data};\n}\nbool VisitorDataSource::Send(absl::string_view frame_header,\n size_t payload_length) {\n return visitor_.SendDataFrame(stream_id_, frame_header, payload_length);\n}\nTestMetadataSource::TestMetadataSource(const quiche::HttpHeaderBlock& entries)\n : encoded_entries_(EncodeHeaders(entries)) {\n remaining_ = encoded_entries_;\n}\nstd::pair TestMetadataSource::Pack(uint8_t* dest,\n size_t dest_len) {\n if (fail_when_packing_) {\n return {-1, false};\n }\n const size_t copied = std::min(dest_len, remaining_.size());\n std::memcpy(dest, remaining_.data(), copied);\n remaining_.remove_prefix(copied);\n return std::make_pair(copied, remaining_.empty());\n}\nnamespace {\nusing TypeAndOptionalLength =\n std::pair>;\nstd::ostream& operator<<(\n std::ostream& os,\n const std::vector& types_and_lengths) {\n for (const auto& type_and_length : types_and_lengths) {\n os << \"(\" << spdy::FrameTypeToString(type_and_length.first) << \", \"\n << (type_and_length.second ? absl::StrCat(type_and_length.second.value())\n : \"\")\n << \") \";\n }\n return os;\n}\nstd::string FrameTypeToString(uint8_t frame_type) {\n if (spdy::IsDefinedFrameType(frame_type)) {\n return spdy::FrameTypeToString(spdy::ParseFrameType(frame_type));\n } else {\n return absl::StrFormat(\"0x%x\", static_cast(frame_type));\n }\n}\nclass SpdyControlFrameMatcher\n : public testing::MatcherInterface {\n public:\n explicit SpdyControlFrameMatcher(\n std::vector types_and_lengths)\n : expected_types_and_lengths_(std::move(types_and_lengths)) {}\n bool MatchAndExplain(absl::string_view s,\n testing::MatchResultListener* listener) const override {\n quiche::QuicheDataReader reader(s.data(), s.size());\n for (TypeAndOptionalLength expected : expected_types_and_lengths_) {\n if (!MatchAndExplainOneFrame(expected.first, expected.second, &reader,\n listener)) {\n return false;\n }\n }\n if (!reader.IsDoneReading()) {\n *listener << \"; \" << reader.BytesRemaining() << \" bytes left to read!\";\n return false;\n }\n return true;\n }\n bool MatchAndExplainOneFrame(spdy::SpdyFrameType expected_type,\n std::optional expected_length,\n quiche::QuicheDataReader* reader,\n testing::MatchResultListener* listener) const {\n uint32_t payload_length;\n if (!reader->ReadUInt24(&payload_length)) {\n *listener << \"; unable to read length field for expected_type \"\n << FrameTypeToString(expected_type) << \". data too short!\";\n return false;\n }\n if (expected_length && payload_length != expected_length.value()) {\n *listener << \"; actual length: \" << payload_length\n << \" but expected length: \" << expected_length.value();\n return false;\n }\n uint8_t raw_type;\n if (!reader->ReadUInt8(&raw_type)) {\n *listener << \"; unable to read type field for expected_type \"\n << FrameTypeToString(expected_type) << \". data too short!\";\n return false;\n }\n if (raw_type != static_cast(expected_type)) {\n *listener << \"; actual type: \" << FrameTypeToString(raw_type)\n << \" but expected type: \" << FrameTypeToString(expected_type);\n return false;\n }\n reader->Seek(5 + payload_length);\n return true;\n }\n void DescribeTo(std::ostream* os) const override {\n *os << \"Data contains frames of types in sequence \"\n << expected_types_and_lengths_;\n }\n void DescribeNegationTo(std::ostream* os) const override {\n *os << \"Data does not contain frames of types in sequence \"\n << expected_types_and_lengths_;\n }\n private:\n const std::vector expected_types_and_lengths_;\n};\n} \ntesting::Matcher EqualsFrames(\n std::vector>>\n types_and_lengths) {\n return MakeMatcher(new SpdyControlFrameMatcher(std::move(types_and_lengths)));\n}\ntesting::Matcher EqualsFrames(\n std::vector types) {\n std::vector>>\n types_and_lengths;\n types_and_lengths.reserve(types.size());\n for (spdy::SpdyFrameType type : types) {\n types_and_lengths.push_back({type, std::nullopt});\n }\n return MakeMatcher(new SpdyControlFrameMatcher(std::move(types_and_lengths)));\n}\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/http2/adapter/test_utils.h\"\n#include \n#include \n#include \n#include \"quiche/http2/core/spdy_framer.h\"\n#include \"quiche/common/platform/api/quiche_test.h\"\nnamespace http2 {\nnamespace adapter {\nnamespace test {\nnamespace {\nusing spdy::SpdyFramer;\nTEST(EqualsFrames, Empty) {\n EXPECT_THAT(\"\", EqualsFrames(std::vector{}));\n}\nTEST(EqualsFrames, SingleFrameWithLength) {\n SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};\n spdy::SpdyPingIR ping{511};\n EXPECT_THAT(framer.SerializeFrame(ping),\n EqualsFrames({{spdy::SpdyFrameType::PING, 8}}));\n spdy::SpdyWindowUpdateIR window_update{1, 101};\n EXPECT_THAT(framer.SerializeFrame(window_update),\n EqualsFrames({{spdy::SpdyFrameType::WINDOW_UPDATE, 4}}));\n spdy::SpdyDataIR data{3, \"Some example data, ha ha!\"};\n EXPECT_THAT(framer.SerializeFrame(data),\n EqualsFrames({{spdy::SpdyFrameType::DATA, 25}}));\n}\nTEST(EqualsFrames, SingleFrameWithoutLength) {\n SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};\n spdy::SpdyRstStreamIR rst_stream{7, spdy::ERROR_CODE_REFUSED_STREAM};\n EXPECT_THAT(framer.SerializeFrame(rst_stream),\n EqualsFrames({{spdy::SpdyFrameType::RST_STREAM, std::nullopt}}));\n spdy::SpdyGoAwayIR goaway{13, spdy::ERROR_CODE_ENHANCE_YOUR_CALM,\n \"Consider taking some deep breaths.\"};\n EXPECT_THAT(framer.SerializeFrame(goaway),\n EqualsFrames({{spdy::SpdyFrameType::GOAWAY, std::nullopt}}));\n quiche::HttpHeaderBlock block;\n block[\":method\"] = \"GET\";\n block[\":path\"] = \"/example\";\n block[\":authority\"] = \"example.com\";\n spdy::SpdyHeadersIR headers{17, std::move(block)};\n EXPECT_THAT(framer.SerializeFrame(headers),\n EqualsFrames({{spdy::SpdyFrameType::HEADERS, std::nullopt}}));\n}\nTEST(EqualsFrames, MultipleFrames) {\n SpdyFramer framer{SpdyFramer::ENABLE_COMPRESSION};\n spdy::SpdyPingIR ping{511};\n spdy::SpdyWindowUpdateIR window_update{1, 101};\n spdy::SpdyDataIR data{3, \"Some example data, ha ha!\"};\n spdy::SpdyRstStreamIR rst_stream{7, spdy::ERROR_CODE_REFUSED_STREAM};\n spdy::SpdyGoAwayIR goaway{13, spdy::ERROR_CODE_ENHANCE_YOUR_CALM,\n \"Consider taking some deep breaths.\"};\n quiche::HttpHeaderBlock block;\n block[\":method\"] = \"GET\";\n block[\":path\"] = \"/example\";\n block[\":authority\"] = \"example.com\";\n spdy::SpdyHeadersIR headers{17, std::move(block)};\n const std::string frame_sequence =\n absl::StrCat(absl::string_view(framer.SerializeFrame(ping)),\n absl::string_view(framer.SerializeFrame(window_update)),\n absl::string_view(framer.SerializeFrame(data)),\n absl::string_view(framer.SerializeFrame(rst_stream)),\n absl::string_view(framer.SerializeFrame(goaway)),\n absl::string_view(framer.SerializeFrame(headers)));\n absl::string_view frame_sequence_view = frame_sequence;\n EXPECT_THAT(frame_sequence,\n EqualsFrames({{spdy::SpdyFrameType::PING, std::nullopt},\n {spdy::SpdyFrameType::WINDOW_UPDATE, std::nullopt},\n {spdy::SpdyFrameType::DATA, 25},\n {spdy::SpdyFrameType::RST_STREAM, std::nullopt},\n {spdy::SpdyFrameType::GOAWAY, 42},\n {spdy::SpdyFrameType::HEADERS, 19}}));\n EXPECT_THAT(frame_sequence_view,\n EqualsFrames({{spdy::SpdyFrameType::PING, std::nullopt},\n {spdy::SpdyFrameType::WINDOW_UPDATE, std::nullopt},\n {spdy::SpdyFrameType::DATA, 25},\n {spdy::SpdyFrameType::RST_STREAM, std::nullopt},\n {spdy::SpdyFrameType::GOAWAY, 42},\n {spdy::SpdyFrameType::HEADERS, 19}}));\n EXPECT_THAT(\n frame_sequence,\n EqualsFrames(\n {spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,\n spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,\n spdy::SpdyFrameType::GOAWAY, spdy::SpdyFrameType::HEADERS}));\n EXPECT_THAT(\n frame_sequence_view,\n EqualsFrames(\n {spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,\n spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,\n spdy::SpdyFrameType::GOAWAY, spdy::SpdyFrameType::HEADERS}));\n EXPECT_THAT(\n frame_sequence,\n testing::Not(EqualsFrames(\n {spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,\n spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,\n spdy::SpdyFrameType::GOAWAY})));\n EXPECT_THAT(\n frame_sequence_view,\n testing::Not(EqualsFrames(\n {spdy::SpdyFrameType::PING, spdy::SpdyFrameType::WINDOW_UPDATE,\n spdy::SpdyFrameType::DATA, spdy::SpdyFrameType::RST_STREAM,\n spdy::SpdyFrameType::GOAWAY})));\n}\n} \n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/test_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/test_utils_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":1193,"cells":{"ID":{"kind":"string","value":"732356cd-3b81-4dfa-8545-9e40ed39711b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stablehlo_type_utils"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_\n#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_UTILS_STABLEHLO_TYPE_UTILS_H_\n#include \"llvm/ADT/StringRef.h\"\n#include \"mlir/IR/BuiltinAttributes.h\" \n#include \"mlir/Transforms/DialectConversion.h\" \n#include \"stablehlo/dialect/StablehloOps.h\" \nnamespace mlir::quant::stablehlo {\ninline bool IsStablehloOp(Operation* op) {\n return op->getDialect()->getNamespace() ==\n mlir::stablehlo::StablehloDialect::getDialectNamespace();\n}\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h\"\n#include \n#include \"mlir/Dialect/Arith/IR/Arith.h\" \n#include \"mlir/Dialect/Func/IR/FuncOps.h\" \n#include \"mlir/IR/Builders.h\" \n#include \"mlir/IR/MLIRContext.h\" \n#include \"mlir/IR/OwningOpRef.h\" \n#include \"stablehlo/dialect/StablehloOps.h\" \nnamespace mlir::quant::stablehlo {\nnamespace {\nusing ::testing::Test;\nclass StablehloTypeUtilsTest : public Test {\n protected:\n StablehloTypeUtilsTest() {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_{&ctx_};\n};\nTEST_F(StablehloTypeUtilsTest, IsStablehloOpSucceedsWithStablehloOp) {\n const OwningOpRef constant_op =\n builder_.create(\n builder_.getUnknownLoc(), builder_.getI32IntegerAttr(0));\n EXPECT_TRUE(IsStablehloOp(*constant_op));\n}\nTEST_F(StablehloTypeUtilsTest, IsStablehloOpFailsWithArithOp) {\n const OwningOpRef constant_op =\n builder_.create(builder_.getUnknownLoc(),\n builder_.getI32IntegerAttr(0));\n EXPECT_FALSE(IsStablehloOp(*constant_op));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":1194,"cells":{"ID":{"kind":"string","value":"d5254719-24c1-4a2c-931b-bfce7b9085fb"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"virtual_chunked"},"File Path in Repository":{"kind":"string","value":"tensorstore/driver/virtual_chunked/virtual_chunked.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/driver/virtual_chunked/virtual_chunked_test.cc"},"Code":{"kind":"string","value":"#include \"tensorstore/virtual_chunked.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/optimization.h\"\n#include \"absl/status/status.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"tensorstore/array.h\"\n#include \"tensorstore/box.h\"\n#include \"tensorstore/chunk_layout.h\"\n#include \"tensorstore/codec_spec.h\"\n#include \"tensorstore/context.h\"\n#include \"tensorstore/contiguous_layout.h\"\n#include \"tensorstore/data_type.h\"\n#include \"tensorstore/driver/chunk_cache_driver.h\"\n#include \"tensorstore/driver/driver.h\"\n#include \"tensorstore/driver/driver_handle.h\"\n#include \"tensorstore/driver/driver_spec.h\"\n#include \"tensorstore/driver/registry.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_interval.h\"\n#include \"tensorstore/index_space/dimension_units.h\"\n#include \"tensorstore/index_space/index_domain.h\"\n#include \"tensorstore/index_space/index_domain_builder.h\"\n#include \"tensorstore/index_space/index_transform.h\"\n#include \"tensorstore/index_space/index_transform_builder.h\"\n#include \"tensorstore/internal/async_write_array.h\"\n#include \"tensorstore/internal/cache/async_cache.h\"\n#include \"tensorstore/internal/cache/cache.h\"\n#include \"tensorstore/internal/cache/cache_pool_resource.h\"\n#include \"tensorstore/internal/cache/chunk_cache.h\"\n#include \"tensorstore/internal/chunk_grid_specification.h\"\n#include \"tensorstore/internal/data_copy_concurrency_resource.h\"\n#include \"tensorstore/internal/integer_overflow.h\"\n#include \"tensorstore/internal/memory.h\"\n#include \"tensorstore/kvstore/generation.h\"\n#include \"tensorstore/open_mode.h\"\n#include \"tensorstore/open_options.h\"\n#include \"tensorstore/rank.h\"\n#include \"tensorstore/serialization/absl_time.h\" \n#include \"tensorstore/serialization/std_optional.h\" \n#include \"tensorstore/staleness_bound.h\"\n#include \"tensorstore/strided_layout.h\"\n#include \"tensorstore/transaction.h\"\n#include \"tensorstore/util/byte_strided_pointer.h\"\n#include \"tensorstore/util/element_pointer.h\"\n#include \"tensorstore/util/future.h\"\n#include \"tensorstore/util/garbage_collection/garbage_collection.h\"\n#include \"tensorstore/util/garbage_collection/std_optional.h\" \n#include \"tensorstore/util/result.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status.h\"\n#include \"tensorstore/util/str_cat.h\"\nnamespace tensorstore {\nnamespace virtual_chunked {\nnamespace {\nclass VirtualChunkedCache : public internal::ConcreteChunkCache {\n using Base = internal::ConcreteChunkCache;\n public:\n using Base::Base;\n template \n void DoRead(EntryOrNode& node, AsyncCacheReadRequest request);\n class Entry : public internal::ChunkCache::Entry {\n public:\n using OwningCache = VirtualChunkedCache;\n using internal::ChunkCache::Entry::Entry;\n void DoRead(AsyncCacheReadRequest request) override {\n GetOwningCache(*this).DoRead(*this, std::move(request));\n }\n };\n class TransactionNode : public internal::ChunkCache::TransactionNode {\n public:\n using OwningCache = VirtualChunkedCache;\n using internal::ChunkCache::TransactionNode::TransactionNode;\n std::atomic marked_as_terminal_{false};\n absl::Status DoInitialize(\n internal::OpenTransactionPtr& transaction) override {\n SetReadsCommitted();\n return internal::ChunkCache::TransactionNode::DoInitialize(transaction);\n }\n absl::Status OnModified() override {\n if (!marked_as_terminal_.exchange(true, std::memory_order_acq_rel)) {\n return this->MarkAsTerminal();\n }\n return absl::OkStatus();\n }\n std::string Describe() override;\n void DoRead(AsyncCacheReadRequest request) override {\n GetOwningCache(*this).DoRead(*this, std::move(request));\n }\n void Commit() override;\n void InitiateWriteback(absl::Time staleness_bound);\n };\n Entry* DoAllocateEntry() final { return new Entry; }\n size_t DoGetSizeofEntry() final { return sizeof(Entry); }\n TransactionNode* DoAllocateTransactionNode(\n internal::AsyncCache::Entry& entry) final {\n return new TransactionNode(static_cast(entry));\n }\n std::vector grid_origin_for_read_function_;\n DimensionUnitsVector dimension_units_;\n std::vector inner_order_;\n ReadFunction read_function_;\n WriteFunction write_function_;\n Context::Resource\n data_copy_concurrency_;\n Context::Resource cache_pool_;\n};\nbool GetPermutedPartialArray(\n VirtualChunkedCache::Entry& entry, ArrayView full_array,\n Array& partial_array) {\n auto& cache = static_cast(GetOwningCache(entry));\n const auto& component_spec = cache.grid().components.front();\n const DimensionIndex rank = component_spec.rank();\n span cell_shape = component_spec.shape();\n span cell_indices = entry.cell_indices();\n span inner_order = cache.inner_order_;\n span grid_origin_for_read_function =\n cache.grid_origin_for_read_function_;\n BoxView<> domain_bounds = component_spec.array_spec.valid_data_bounds;\n partial_array.layout().set_rank(rank);\n ByteStridedPointer data = full_array.byte_strided_pointer();\n for (DimensionIndex component_dim = 0; component_dim < rank;\n ++component_dim) {\n const DimensionIndex external_dim = inner_order[component_dim];\n const Index byte_stride = full_array.byte_strides()[component_dim];\n partial_array.byte_strides()[external_dim] = byte_stride;\n Index grid_origin_value = grid_origin_for_read_function[external_dim];\n Index chunk_start = cell_indices[component_dim] * cell_shape[component_dim];\n Index chunk_end = chunk_start + cell_shape[component_dim];\n Index request_start =\n std::max(chunk_start, domain_bounds.origin()[component_dim]);\n Index request_end =\n std::min(chunk_end, domain_bounds[component_dim].exclusive_max());\n if (request_start >= request_end) {\n return false;\n }\n partial_array.origin()[external_dim] = request_start + grid_origin_value;\n partial_array.shape()[external_dim] = request_end - request_start;\n data -= internal::wrap_on_overflow::Multiply(\n byte_stride, chunk_start + grid_origin_value);\n }\n partial_array.element_pointer() =\n ElementPointer(data, full_array.dtype());\n return true;\n}\ntemplate \nvoid VirtualChunkedCache::DoRead(EntryOrNode& node,\n AsyncCacheReadRequest request) {\n auto& cache = GetOwningCache(node);\n if (!cache.read_function_) {\n node.ReadError(absl::InvalidArgumentError(\n \"Write-only virtual chunked view requires chunk-aligned writes\"));\n return;\n }\n auto& executor = cache.executor();\n executor([&node, staleness_bound = request.staleness_bound] {\n auto& entry = GetOwningEntry(node);\n auto& cache = GetOwningCache(entry);\n const auto& component_spec = cache.grid().components.front();\n span cell_shape = component_spec.shape();\n auto full_array = AllocateArray(cell_shape, c_order, default_init,\n component_spec.dtype());\n Array partial_array;\n auto read_data =\n tensorstore::internal::make_shared_for_overwrite(1);\n if (!GetPermutedPartialArray(entry, full_array, partial_array)) {\n node.ReadSuccess(\n {std::move(read_data),\n {StorageGeneration::NoValue(), absl::InfiniteFuture()}});\n return;\n }\n read_data.get()[0] = full_array;\n ReadParameters read_params;\n read_params.executor_ = cache.executor();\n {\n ReadLock lock{node};\n read_params.if_not_equal_ = lock.stamp().generation;\n }\n read_params.staleness_bound_ = staleness_bound;\n auto read_future =\n cache.read_function_(ConstDataTypeCast(std::move(partial_array)),\n std::move(read_params));\n read_future.Force();\n read_future.ExecuteWhenReady(\n [&node, read_data = std::move(read_data)](\n ReadyFuture future) mutable {\n auto& r = future.result();\n if (!r.ok()) {\n node.ReadError(std::move(r).status());\n return;\n }\n if (StorageGeneration::IsUnknown(r->generation)) {\n ReadState read_state;\n {\n ReadLock lock{node};\n read_state = lock.read_state();\n }\n read_state.stamp.time = r->time;\n node.ReadSuccess(std::move(read_state));\n return;\n }\n node.ReadSuccess({std::move(read_data), std::move(*r)});\n return;\n });\n });\n}\nstd::string VirtualChunkedCache::TransactionNode::Describe() {\n auto& entry = GetOwningEntry(*this);\n auto& cache = GetOwningCache(entry);\n auto domain = cache.grid().GetValidCellDomain(0, entry.cell_indices());\n if (domain.is_empty()) return {};\n return tensorstore::StrCat(\"write to virtual chunk \", domain);\n}\nvoid VirtualChunkedCache::TransactionNode::Commit() {\n if (!GetOwningCache(*this).write_function_) {\n SetError(absl::InternalError(\n \"No write function specified to virtual_chunked driver\"));\n this->WritebackError();\n return;\n }\n InitiateWriteback(absl::InfinitePast());\n internal::ChunkCache::TransactionNode::Commit();\n}\nvoid VirtualChunkedCache::TransactionNode::InitiateWriteback(\n absl::Time staleness_bound) {\n struct ApplyReceiver {\n TransactionNode& self;\n void set_value(AsyncCache::ReadState update) {\n GetOwningCache(self).executor()(\n [node = &self, update = std::move(update)] {\n auto* read_data = static_cast(update.data.get());\n SharedArray full_array;\n auto& entry = GetOwningEntry(*node);\n auto& cache = GetOwningCache(*node);\n if (read_data && read_data[0].valid()) {\n full_array = read_data[0];\n } else {\n full_array =\n node->component_specs()[0].array_spec.GetFillValueForDomain(\n cache.grid().GetCellDomain(0, entry.cell_indices()));\n }\n Array partial_array;\n if (!GetPermutedPartialArray(entry, full_array, partial_array)) {\n node->WritebackSuccess(\n {std::move(update.data),\n {StorageGeneration::NoValue(), absl::InfiniteFuture()}});\n return;\n }\n WriteParameters write_params;\n write_params.if_equal_ =\n StorageGeneration::Clean(update.stamp.generation);\n write_params.executor_ = cache.executor();\n auto write_future = cache.write_function_(std::move(partial_array),\n std::move(write_params));\n write_future.Force();\n write_future.ExecuteWhenReady(\n [node = node, update = std::move(update),\n full_array = std::move(full_array)](\n ReadyFuture future) mutable {\n auto& r = future.result();\n if (!r.ok()) {\n node->SetError(std::move(r).status());\n node->WritebackError();\n return;\n }\n if (StorageGeneration::IsUnknown(r->generation)) {\n node->InitiateWriteback(r->time);\n return;\n }\n update.stamp = std::move(*r);\n node->WritebackSuccess(std::move(update));\n });\n });\n }\n void set_error(absl::Status error) {\n self.SetError(std::move(error));\n self.WritebackError();\n }\n void set_cancel() { ABSL_UNREACHABLE(); } \n };\n AsyncCache::TransactionNode::ApplyOptions apply_options;\n apply_options.staleness_bound = staleness_bound;\n this->DoApply(std::move(apply_options), ApplyReceiver{*this});\n}\nclass VirtualChunkedDriverSpec\n : public internal::RegisteredDriverSpec {\n public:\n constexpr static const char id[] = \"virtual_chunked\";\n std::optional read_function;\n std::optional write_function;\n Context::Resource\n data_copy_concurrency;\n Context::Resource cache_pool;\n StalenessBound data_staleness;\n constexpr static auto ApplyMembers = [](auto&& x, auto f) {\n return f(internal::BaseCast(x), x.read_function,\n x.write_function, x.data_copy_concurrency, x.cache_pool,\n x.data_staleness);\n };\n OpenMode open_mode() const override {\n return OpenMode::open;\n }\n Future Open(\n internal::DriverOpenRequest request) const override;\n absl::Status ApplyOptions(SpecOptions&& options) override {\n if (options.kvstore.valid()) {\n return absl::InvalidArgumentError(\n \"virtual_chunked driver does not support a kvstore\");\n }\n if (options.recheck_cached_data.specified()) {\n data_staleness = StalenessBound(options.recheck_cached_data);\n }\n if (options.recheck_cached_metadata.specified()) {\n return absl::InvalidArgumentError(\n \"virtual_chunked driver does not support recheck_cached_metadata\");\n }\n return schema.Set(static_cast(options));\n }\n};\nclass VirtualChunkedDriver;\nusing VirtualChunkedDriverBase = internal::RegisteredDriver<\n VirtualChunkedDriver,\n internal::ChunkGridSpecificationDriver<\n VirtualChunkedCache, internal::ChunkCacheReadWriteDriverMixin<\n VirtualChunkedDriver, internal::Driver>>>;\nclass VirtualChunkedDriver : public VirtualChunkedDriverBase {\n using Base = VirtualChunkedDriverBase;\n public:\n using Base::Base;\n Result GetBoundSpec(\n internal::OpenTransactionPtr transaction,\n IndexTransformView<> transform) override;\n static Result OpenFromSpecData(\n Transaction transaction, const VirtualChunkedDriverSpec& spec,\n ReadWriteMode read_write_mode = ReadWriteMode::dynamic);\n Result GetCodec() override { return CodecSpec{}; }\n Result GetDimensionUnits() override {\n return cache()->dimension_units_;\n }\n Result> GetFillValue(\n IndexTransformView<> transform) override {\n return {std::in_place};\n }\n Result GetChunkLayout(IndexTransformView<> transform) override {\n return internal::GetChunkLayoutFromGrid(cache()->grid().components[0]) |\n transform;\n }\n};\nResult VirtualChunkedDriver::GetBoundSpec(\n internal::OpenTransactionPtr transaction, IndexTransformView<> transform) {\n auto driver_spec = internal::DriverSpec::Make();\n driver_spec->context_binding_state_ = ContextBindingState::bound;\n auto& cache = *this->cache();\n if (cache.read_function_) {\n driver_spec->read_function = cache.read_function_;\n }\n if (cache.write_function_) {\n driver_spec->write_function = cache.write_function_;\n }\n driver_spec->data_copy_concurrency = cache.data_copy_concurrency_;\n driver_spec->cache_pool = cache.cache_pool_;\n driver_spec->data_staleness = this->data_staleness_bound();\n const DimensionIndex rank = this->rank();\n TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(RankConstraint{rank}));\n TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(dtype()));\n TENSORSTORE_RETURN_IF_ERROR(\n driver_spec->schema.Set(Schema::DimensionUnits(cache.dimension_units_)));\n TENSORSTORE_RETURN_IF_ERROR(\n driver_spec->schema.Set(ChunkLayout::InnerOrder(cache.inner_order_)));\n TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(\n ChunkLayout::GridOrigin(cache.grid_origin_for_read_function_)));\n span inner_order = cache.inner_order_;\n span grid_origin_for_read_function =\n cache.grid_origin_for_read_function_;\n const auto& component_spec = cache.grid().components[component_index()];\n IndexTransformBuilder external_to_output_transform_builder(rank, rank);\n IndexDomainBuilder external_domain_builder(rank);\n Index chunk_shape[kMaxRank];\n for (DimensionIndex component_dim = 0; component_dim < rank;\n ++component_dim) {\n const DimensionIndex external_dim = inner_order[component_dim];\n const Index offset = grid_origin_for_read_function[external_dim];\n chunk_shape[external_dim] = component_spec.shape()[component_dim];\n external_to_output_transform_builder.output_single_input_dimension(\n external_dim, offset, 1, component_dim);\n TENSORSTORE_ASSIGN_OR_RETURN(\n external_domain_builder.bounds()[external_dim],\n ShiftInterval(\n component_spec.array_spec.valid_data_bounds[component_dim],\n offset));\n }\n TENSORSTORE_ASSIGN_OR_RETURN(auto external_to_output_transform,\n external_to_output_transform_builder.Finalize());\n TENSORSTORE_ASSIGN_OR_RETURN(auto external_domain,\n external_domain_builder.Finalize());\n TENSORSTORE_RETURN_IF_ERROR(driver_spec->schema.Set(\n ChunkLayout::ChunkShape(span(&chunk_shape[0], rank))));\n TENSORSTORE_RETURN_IF_ERROR(\n driver_spec->schema.Set(std::move(external_domain)));\n internal::TransformedDriverSpec spec;\n TENSORSTORE_ASSIGN_OR_RETURN(\n spec.transform,\n ComposeTransforms(external_to_output_transform, transform));\n spec.driver_spec = std::move(driver_spec);\n return spec;\n}\nResult VirtualChunkedDriver::OpenFromSpecData(\n Transaction transaction, const VirtualChunkedDriverSpec& spec,\n ReadWriteMode read_write_mode) {\n if ((read_write_mode & ReadWriteMode::read) == ReadWriteMode::read &&\n !spec.read_function) {\n return absl::InvalidArgumentError(\"Reading not supported\");\n }\n if ((read_write_mode & ReadWriteMode::write) == ReadWriteMode::write &&\n !spec.write_function) {\n return absl::InvalidArgumentError(\"Writing not supported\");\n }\n if (read_write_mode == ReadWriteMode::dynamic) {\n read_write_mode =\n (spec.read_function ? ReadWriteMode::read : ReadWriteMode{}) |\n (spec.write_function ? ReadWriteMode::write : ReadWriteMode{});\n }\n const DimensionIndex rank = spec.schema.rank();\n if (rank == dynamic_rank) {\n return absl::InvalidArgumentError(\"rank must be specified\");\n }\n DataType dtype = spec.schema.dtype();\n if (!dtype.valid()) {\n return absl::InvalidArgumentError(\"dtype must be specified\");\n }\n IndexDomain<> domain = spec.schema.domain();\n if (!domain.valid()) {\n domain = IndexDomain<>(rank);\n }\n domain = WithImplicitDimensions(std::move(domain),\n false,\n false);\n Box<> chunk_template(rank);\n std::vector inner_order(rank);\n {\n ChunkLayout chunk_layout = spec.schema.chunk_layout();\n if (chunk_layout.codec_chunk_shape().hard_constraint) {\n return absl::InvalidArgumentError(\"codec_chunk_shape not supported\");\n }\n if (spec.schema.fill_value().valid()) {\n return absl::InvalidArgumentError(\"fill_value not supported\");\n }\n TENSORSTORE_RETURN_IF_ERROR(\n internal::ChooseReadWriteChunkGrid(chunk_layout, domain.box(),\n chunk_template),\n tensorstore::MaybeAnnotateStatus(_, \"Failed to compute chunk grid\"));\n if (auto requested_inner_order = chunk_layout.inner_order();\n requested_inner_order.valid()) {\n std::copy_n(requested_inner_order.begin(), rank, inner_order.begin());\n } else {\n std::iota(inner_order.begin(), inner_order.end(), DimensionIndex(0));\n }\n }\n auto external_dimension_units = spec.schema.dimension_units();\n Box<> adjusted_component_domain(rank);\n DimensionUnitsVector component_units(rank);\n for (DimensionIndex component_dim = 0; component_dim < rank;\n ++component_dim) {\n const DimensionIndex external_dim = inner_order[component_dim];\n TENSORSTORE_ASSIGN_OR_RETURN(\n adjusted_component_domain[component_dim],\n ShiftIntervalBackward(domain[external_dim],\n chunk_template.origin()[external_dim]));\n if (external_dimension_units.valid()) {\n component_units[component_dim] = external_dimension_units[external_dim];\n }\n }\n internal::Driver::Handle handle;\n handle.transaction = std::move(transaction);\n {\n IndexTransformBuilder transform_builder(rank, rank);\n transform_builder.input_domain(domain);\n for (DimensionIndex component_dim = 0; component_dim < rank;\n ++component_dim) {\n const DimensionIndex external_dim = inner_order[component_dim];\n transform_builder.output_single_input_dimension(\n component_dim, -chunk_template.origin()[external_dim], 1,\n external_dim);\n }\n TENSORSTORE_ASSIGN_OR_RETURN(handle.transform,\n transform_builder.Finalize());\n }\n auto cache =\n internal::GetCache(spec.cache_pool->get(), \"\", [&] {\n auto fill_value =\n BroadcastArray(AllocateArray(span{}, c_order,\n value_init, spec.schema.dtype()),\n BoxView<>(rank))\n .value();\n std::vector chunk_shape(rank);\n for (DimensionIndex component_dim = 0; component_dim < rank;\n ++component_dim) {\n const DimensionIndex external_dim = inner_order[component_dim];\n chunk_shape[component_dim] = chunk_template.shape()[external_dim];\n }\n internal::ChunkGridSpecification::ComponentList components;\n components.emplace_back(\n internal::AsyncWriteArray::Spec{\n std::move(fill_value), std::move(adjusted_component_domain)},\n std::move(chunk_shape));\n auto cache = std::make_unique(\n internal::ChunkGridSpecification(std::move(components)),\n spec.data_copy_concurrency->executor);\n cache->dimension_units_ = std::move(component_units);\n if (spec.read_function) {\n cache->read_function_ = *spec.read_function;\n }\n if (spec.write_function) {\n cache->write_function_ = *spec.write_function;\n }\n cache->inner_order_ = std::move(inner_order);\n cache->grid_origin_for_read_function_.assign(\n chunk_template.origin().begin(), chunk_template.origin().end());\n cache->cache_pool_ = spec.cache_pool;\n cache->data_copy_concurrency_ = spec.data_copy_concurrency;\n return cache;\n });\n handle.driver = internal::MakeReadWritePtr(\n read_write_mode, VirtualChunkedDriver::Initializer{\n std::move(cache), 0,\n spec.data_staleness.BoundAtOpen(absl::Now())});\n return handle;\n}\nFuture VirtualChunkedDriverSpec::Open(\n internal::DriverOpenRequest request) const {\n return VirtualChunkedDriver::OpenFromSpecData(\n internal::TransactionState::ToTransaction(std::move(request.transaction)),\n *this, request.read_write_mode);\n}\n} \nnamespace internal_virtual_chunked {\nResult MakeDriver(\n virtual_chunked::ReadFunction read_function,\n virtual_chunked::WriteFunction write_function, OpenOptions&& options) {\n VirtualChunkedDriverSpec spec;\n if (read_function) {\n spec.read_function = std::move(read_function);\n }\n if (write_function) {\n spec.write_function = std::move(write_function);\n }\n spec.schema = static_cast(options);\n if (!options.context) {\n options.context = Context::Default();\n }\n TENSORSTORE_ASSIGN_OR_RETURN(\n spec.cache_pool,\n options.context.GetResource());\n TENSORSTORE_ASSIGN_OR_RETURN(\n spec.data_copy_concurrency,\n options.context.GetResource());\n if (options.recheck_cached_data.specified()) {\n spec.data_staleness = StalenessBound(options.recheck_cached_data);\n }\n return VirtualChunkedDriver::OpenFromSpecData(std::move(options.transaction),\n spec);\n}\n} \n} \nnamespace garbage_collection {\ntemplate <>\nstruct GarbageCollection {\n static void Visit(GarbageCollectionVisitor& visitor,\n const virtual_chunked::VirtualChunkedDriver& value) {\n garbage_collection::GarbageCollectionVisit(visitor,\n value.cache()->read_function_);\n garbage_collection::GarbageCollectionVisit(visitor,\n value.cache()->write_function_);\n }\n};\n} \n} \nnamespace {\nconst tensorstore::internal::SerializationOnlyDriverRegistration<\n tensorstore::virtual_chunked::VirtualChunkedDriverSpec>\n driver_registration;\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/virtual_chunked.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"tensorstore/array.h\"\n#include \"tensorstore/chunk_layout.h\"\n#include \"tensorstore/context.h\"\n#include \"tensorstore/data_type.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_space/dim_expression.h\"\n#include \"tensorstore/internal/queue_testutil.h\"\n#include \"tensorstore/kvstore/generation.h\"\n#include \"tensorstore/kvstore/test_util.h\"\n#include \"tensorstore/open_mode.h\"\n#include \"tensorstore/rank.h\"\n#include \"tensorstore/schema.h\"\n#include \"tensorstore/serialization/function.h\"\n#include \"tensorstore/serialization/serialization.h\"\n#include \"tensorstore/serialization/test_util.h\"\n#include \"tensorstore/staleness_bound.h\"\n#include \"tensorstore/strided_layout.h\"\n#include \"tensorstore/tensorstore.h\"\n#include \"tensorstore/transaction.h\"\n#include \"tensorstore/util/future.h\"\n#include \"tensorstore/util/iterate_over_index_range.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status_testutil.h\"\nnamespace {\nusing ::tensorstore::DimensionIndex;\nusing ::tensorstore::dynamic_rank;\nusing ::tensorstore::Future;\nusing ::tensorstore::Index;\nusing ::tensorstore::MatchesStatus;\nusing ::tensorstore::Promise;\nusing ::tensorstore::Result;\nusing ::tensorstore::span;\nusing ::tensorstore::StorageGeneration;\nusing ::tensorstore::TimestampedStorageGeneration;\nusing ::tensorstore::internal::ConcurrentQueue;\nusing ::tensorstore::internal::UniqueNow;\nusing ::tensorstore::serialization::SerializationRoundTrip;\ntemplate \nResult>\nCoordinatesView(DimensionIndex dim, Option&&... option) {\n return tensorstore::VirtualChunked(\n tensorstore::NonSerializable{[dim](auto output, auto read_params)\n -> Future {\n tensorstore::IterateOverIndexRange(\n output.domain(),\n [&](span indices) { output(indices) = indices[dim]; });\n return TimestampedStorageGeneration{StorageGeneration::FromString(\"\"),\n absl::Now()};\n }},\n std::forward