{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n )wrapper\",\n {{\"$DOTS\", dot_graphs_compressed},\n {\"$FRAMES\", frames},\n {\"$TITLE\", graph_title}});\n}\nstatic std::string GraphTitle(const HloComputation& computation) {\n return absl::StrCat(computation.parent()->name(), \"_\", computation.name());\n}\nabsl::StatusOr WrapFusionExplorer(\n const HloComputation& computation) {\n absl::MutexLock lock(&fusion_visualizer_state_mu);\n const FusionVisualizerProgress& visualizer_progress =\n fusion_visualizer_states[FusionVisualizerStateKey(computation)];\n return WrapFusionExplorer(visualizer_progress, GraphTitle(computation));\n}\nstatic absl::StatusOr WrapDotInHtml(absl::string_view dot,\n absl::string_view title) {\n FusionVisualizerProgress progress;\n progress.AddState(dot, title, std::nullopt);\n return WrapFusionExplorer(progress, title);\n}\nstatic absl::StatusOr WrapDotInFormat(\n const HloComputation& computation, absl::string_view dot,\n RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {\n switch (format) {\n case RenderedGraphFormat::kUrl:\n CHECK(url_renderer != nullptr)\n << \"Should have checked url_renderer != null before calling.\";\n return (*url_renderer)(dot);\n case RenderedGraphFormat::kHtml:\n return WrapDotInHtml(dot, GraphTitle(computation));\n case RenderedGraphFormat::kDot:\n return std::string(dot);\n }\n}\nvoid RegisterGraphToURLRenderer(\n std::function(absl::string_view)> renderer) {\n absl::MutexLock lock(&url_renderer_mu);\n if (url_renderer != nullptr) {\n LOG(WARNING) << \"Multiple calls to RegisterGraphToURLRenderer. Last call \"\n \"wins, but because order of initialization in C++ is \"\n \"nondeterministic, this may not be what you want.\";\n }\n delete url_renderer;\n url_renderer =\n new std::function(absl::string_view)>(\n std::move(renderer));\n}\nvoid RegisterFusionState(const HloComputation& computation,\n absl::string_view label,\n const HloInstruction& consumer,\n const HloInstruction* producer) {\n absl::MutexLock lock(&fusion_visualizer_state_mu);\n FusionVisualizerProgress& fusion_progress =\n fusion_visualizer_states[FusionVisualizerStateKey(computation)];\n static constexpr int kRenderRadius = 4;\n absl::flat_hash_set render_boundary;\n for (const HloInstruction* user : consumer.users()) {\n render_boundary.insert(user);\n }\n HloDotDumper dumper(\n consumer.parent(),\n StrCat(\"Rendering of \", kRenderRadius, \" nodes around fusion consumer\"),\n consumer.GetModule()->config().debug_options(), {},\n MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary));\n std::string dot_txt = dumper.Dump();\n std::optional producer_to_highlight;\n if (producer) {\n producer_to_highlight = dumper.CssIdForInstruction(*producer);\n }\n fusion_progress.AddState(dot_txt, label, producer_to_highlight);\n}\nabsl::StatusOr RenderGraph(\n const HloComputation& computation, absl::string_view label,\n const DebugOptions& debug_options, RenderedGraphFormat format,\n HloRenderOptions hlo_render_options,\n std::optional>\n color_map) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return Unavailable(\"Can't render as URL; no URL renderer was registered.\");\n }\n std::string rendered_dot =\n HloDotDumper(&computation, label, debug_options, hlo_render_options,\n NodeFilter(), color_map)\n .Dump();\n return WrapDotInFormat(computation, rendered_dot, format);\n}\nabsl::StatusOr RenderAllComputationsToHtml(\n const HloModule& module) {\n FusionVisualizerProgress progress;\n std::vector instrs =\n module.entry_computation()->MakeInstructionPostOrder();\n absl::c_reverse(instrs);\n for (const HloInstruction* instr : instrs) {\n if (absl::c_linear_search(\n std::vector{HloOpcode::kConstant,\n HloOpcode::kGetTupleElement},\n instr->opcode())) {\n continue;\n }\n HloRenderOptions opts;\n opts.show_fusion_subcomputations = true;\n opts.show_backend_config = true;\n opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile;\n static constexpr int64_t max_nodes_to_render = 100;\n absl::flat_hash_set render_boundary;\n NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary);\n if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) {\n filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary);\n }\n std::string dot =\n HloDotDumper(module.entry_computation(), instr->name(),\n module.config().debug_options(), opts, filter)\n .Dump();\n progress.AddState(dot, instr->name(), std::nullopt);\n }\n return WrapFusionExplorer(progress, module.name());\n}\nabsl::StatusOr RenderNeighborhoodAround(\n const HloInstruction& node, int radius, RenderedGraphFormat format,\n HloRenderOptions hlo_render_options,\n const absl::flat_hash_set& boundary,\n std::optional>\n color_map) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return FailedPrecondition(\n \"Can't render as URL; no URL renderer was registered.\");\n }\n std::string label =\n StrCat(\"Neighborhood of \", radius, \" nodes around \", node.name());\n std::string rendered_dot =\n HloDotDumper(\n node.parent(), label, node.GetModule()->config().debug_options(),\n hlo_render_options,\n MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map)\n .Dump();\n return WrapDotInFormat(*node.parent(), rendered_dot, format);\n}\nabsl::StatusOr RenderAllPathsFromTo(\n const HloInstruction& from, const HloInstruction& to, int64_t max_nodes,\n RenderedGraphFormat format, HloRenderOptions hlo_render_options) {\n absl::MutexLock lock(&url_renderer_mu);\n if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {\n return FailedPrecondition(\n \"Can't render as URL; no URL renderer was registered.\");\n }\n CHECK_EQ(from.parent(), to.parent()) << \"Nodes must be in same computation!\";\n auto debug_options = from.GetModule()->config().debug_options();\n bool hit_limit = false;\n NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit);\n std::string label;\n if (!hit_limit) {\n label = StrCat(\"All paths from \", from.name(), \" to \", to.name());\n } else {\n label = StrCat(max_nodes, \" nodes on the shortest paths from \", from.name(),\n \" to \", to.name(),\n \"

***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN \"\n \"NODES***

\");\n }\n std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options,\n hlo_render_options, filter)\n .Dump();\n return WrapDotInFormat(*from.parent(), rendered_dot, format);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_graph_dumper.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"xla/xla.pb.h\"\nnamespace xla {\nnamespace {\nusing absl::StrCat;\nusing ::testing::HasSubstr;\nusing HloGraphDumperTest = HloTestBase;\nstd::string TestName() {\n return ::testing::UnitTest::GetInstance()->current_test_info()->name();\n}\nTEST_F(HloGraphDumperTest, NestedFusion) {\n HloComputation::Builder b(\"b\");\n auto shape = ShapeUtil::MakeShape(F32, {10, 100});\n std::vector params;\n for (int i = 0; i <= 4; ++i) {\n params.push_back(b.AddInstruction(\n HloInstruction::CreateParameter(i, shape, StrCat(\"param\", i))));\n }\n std::vector sums;\n sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(\n shape, HloOpcode::kAdd, params[0], params[1])));\n for (int i = 0; i <= 2; ++i) {\n sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(\n shape, HloOpcode::kAdd, sums[i], params[i + 2])));\n }\n HloModuleConfig config;\n HloModule m(TestName(), config);\n m.AddEntryComputation(b.Build());\n HloComputation* root_computation = m.entry_computation();\n auto* outer_fusion = root_computation->CreateFusionInstruction(\n {sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop);\n std::vector fused_sums;\n for (auto* instr : outer_fusion->fused_instructions_computation()\n ->MakeInstructionPostOrder()) {\n if (instr->opcode() == HloOpcode::kAdd) {\n fused_sums.push_back(instr);\n }\n }\n auto* inner_fusion =\n outer_fusion->fused_instructions_computation()->CreateFusionInstruction(\n {fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop);\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"\", DebugOptions(),\n RenderedGraphFormat::kDot));\n for (const HloComputation* computation :\n {root_computation, \n inner_fusion->fused_instructions_computation(),\n outer_fusion->fused_instructions_computation()}) {\n for (const HloInstruction* instruction : computation->instructions()) {\n EXPECT_THAT(graph, HasSubstr(instruction->name()));\n }\n }\n const HloInstruction* inner_sum = nullptr;\n for (const HloInstruction* instruction :\n inner_fusion->fused_instructions_computation()->instructions()) {\n if (instruction->opcode() == HloOpcode::kAdd) {\n inner_sum = instruction;\n break;\n }\n }\n ASSERT_NE(inner_sum, nullptr);\n TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph,\n RenderNeighborhoodAround(*inner_sum, 1,\n RenderedGraphFormat::kDot));\n EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name()));\n}\nTEST_F(HloGraphDumperTest, Constant) {\n HloComputation::Builder b(\"b\");\n auto instruction = b.AddInstruction(\n HloInstruction::CreateConstant(LiteralUtil::CreateR0(-42)));\n instruction->SetAndSanitizeName(\"i_am_a_constant_root_instruction\");\n HloModuleConfig config;\n HloModule m(TestName(), config);\n HloComputation* root_computation = m.AddEntryComputation(b.Build());\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"an_empty_graph\", DebugOptions(),\n RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"an_empty_graph\"));\n}\nTEST_F(HloGraphDumperTest, TupleConstant) {\n Shape tuple_shape = ShapeUtil::MakeTupleShape(\n {ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})});\n HloComputation::Builder b(\"b\");\n auto constant = b.AddInstruction(\n HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape)));\n auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement(\n ShapeUtil::MakeShape(F32, {3, 2}), constant, 0));\n HloModuleConfig config;\n HloModule m(TestName(), config);\n HloComputation* root_computation = m.AddEntryComputation(b.Build(gte));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*root_computation, \"tuple_constant\", DebugOptions(),\n RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"tuple_constant\"));\n EXPECT_THAT(graph, HasSubstr(\"constant (f32[3,2], s32[4,5])\"));\n}\nTEST_F(HloGraphDumperTest, Compare) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0)\n param.1 = f32[10] parameter(1)\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n EXPECT_THAT(graph, HasSubstr(\"direction=LT\"));\n}\nTEST_F(HloGraphDumperTest, HasStatisticsViz) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5}\n param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4}\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n}\nTEST_F(HloGraphDumperTest, RootIsConstant) {\n const char* hlo_string = R\"(\nHloModule indexed_conditional\n%then_branch (empty: ()) -> f32[] {\n %empty = () parameter(0)\n ROOT %then = f32[] constant(1)\n}\n%else_branch (empty.1: ()) -> f32[] {\n %empty.1 = () parameter(0)\n ROOT %else = f32[] constant(2)\n}\nENTRY %conditional_select (constant: pred[]) -> (f32[]) {\n %constant = pred[] parameter(0)\n %emptytuple = () tuple()\n %conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch\n ROOT %t = (f32[]) tuple(f32[] %conditional)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot));\n}\nTEST_F(HloGraphDumperTest, OverrideColors) {\n const char* hlo_string = R\"(\n HloModule comp\n ENTRY comp {\n param.0 = f32[10] parameter(0)\n param.1 = f32[10] parameter(1)\n ROOT lt = pred[10] compare(param.0, param.1), direction=LT\n })\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n absl::flat_hash_map color_map;\n ColorStats color_stats_1;\n color_stats_1.color = \"#A9C343\";\n color_stats_1.stats = absl::StrFormat(\"%.3f\", 1.11);\n ColorStats color_stats_2;\n color_stats_2.color = \"#BC8A3F\";\n color_stats_2.stats = absl::StrFormat(\"%.3f\", 2.22);\n color_map[module->entry_computation()->GetInstructionWithName(\"param.0\")] =\n color_stats_1;\n color_map[module->entry_computation()->GetInstructionWithName(\"param.1\")] =\n color_stats_2;\n HloRenderOptions hlo_render_options;\n hlo_render_options.override_node_colors = true;\n TF_ASSERT_OK_AND_ASSIGN(\n std::string graph,\n RenderGraph(*module->entry_computation(), \"tuple_constant\",\n DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options,\n color_map));\n EXPECT_THAT(graph, HasSubstr(\"#A9C343\"));\n EXPECT_THAT(graph, HasSubstr(\"1.110\"));\n EXPECT_THAT(graph, HasSubstr(\"#BC8A3F\"));\n EXPECT_THAT(graph, HasSubstr(\"2.220\"));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":214,"cells":{"ID":{"kind":"string","value":"71217517-bd22-4e54-9891-4344e5585a1c"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"uniform_quantized_types"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h\"\n#include \n#include \"llvm/ADT/STLExtras.h\"\n#include \"llvm/Support/Debug.h\"\n#include \"llvm/Support/MathExtras.h\"\n#include \"mlir/Dialect/Quant/IR/QuantTypes.h\" \n#include \"mlir/IR/BuiltinTypes.h\" \n#include \"mlir/IR/Location.h\" \n#include \"mlir/IR/MLIRContext.h\" \n#include \"mlir/IR/Operation.h\" \n#include \"mlir/IR/Types.h\" \n#include \"mlir/Support/LLVM.h\" \n#define DEBUG_TYPE \"uniform-quantized-types\"\nnamespace mlir {\nnamespace quant {\nUniformQuantizedType CreateI8F32UniformQuantizedType(const Location loc,\n MLIRContext& context,\n const double scale,\n const int64_t zero_point,\n const bool narrow_range) {\n return UniformQuantizedType::getChecked(\n loc, QuantizationFlags::Signed,\n IntegerType::get(&context, 8),\n FloatType::getF32(&context), scale, zero_point,\n llvm::minIntN(8) + (narrow_range ? 1 : 0),\n llvm::maxIntN(8));\n}\nUniformQuantizedType CreateI32F32UniformQuantizedType(\n const Location loc, MLIRContext& context, const double scale,\n const int64_t zero_point) {\n return UniformQuantizedType::getChecked(\n loc, QuantizationFlags::Signed,\n IntegerType::get(&context, 32),\n FloatType::getF32(&context), scale, zero_point,\n llvm::minIntN(32),\n llvm::maxIntN(32));\n}\nUniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType(\n const Location loc, MLIRContext& context, const ArrayRef scales,\n const ArrayRef zero_points, const int quantization_dimension,\n const bool narrow_range) {\n return UniformQuantizedPerAxisType::getChecked(\n loc, QuantizationFlags::Signed,\n IntegerType::get(&context, 8),\n FloatType::getF32(&context),\n SmallVector(scales), SmallVector(zero_points),\n quantization_dimension,\n llvm::minIntN(8) + (narrow_range ? 1 : 0),\n llvm::maxIntN(8));\n}\nUniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(\n const Location loc, MLIRContext& context, const ArrayRef scales,\n const ArrayRef zero_points, const int quantization_dimension) {\n return UniformQuantizedPerAxisType::getChecked(\n loc, QuantizationFlags::Signed,\n IntegerType::get(&context, 32),\n FloatType::getF32(&context),\n SmallVector(scales), SmallVector(zero_points),\n quantization_dimension, llvm::minIntN(32),\n llvm::maxIntN(32));\n}\nbool IsStorageTypeI8(const QuantizedType quantized_type) {\n const Type storage_type = quantized_type.getStorageType();\n return storage_type.isInteger(8);\n}\nbool IsStorageTypeI32(const QuantizedType quantized_type) {\n const Type storage_type = quantized_type.getStorageType();\n return storage_type.isInteger(32);\n}\nbool IsExpressedTypeF32(const QuantizedType quantized_type) {\n const Type expressed_type = quantized_type.getExpressedType();\n return mlir::isa(expressed_type);\n}\nbool IsI8F32UniformQuantizedType(const Type type) {\n const UniformQuantizedType quantized_type =\n mlir::dyn_cast_or_null(type);\n if (!quantized_type) {\n LLVM_DEBUG(llvm::dbgs()\n << \"Expected a uniform quantized type. Got: \" << type << \".\\n\");\n return false;\n }\n if (!IsStorageTypeI8(quantized_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an i8 storage type. Got: \"\n << quantized_type << \".\\n\");\n return false;\n }\n if (!IsExpressedTypeF32(quantized_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an f32 expressed type. Got: \"\n << quantized_type << \".\\n\");\n return false;\n }\n return true;\n}\nbool IsI8F32UniformQuantizedPerAxisType(const Type type) {\n const UniformQuantizedPerAxisType quantized_per_axis_type =\n mlir::dyn_cast_or_null(type);\n if (!quantized_per_axis_type) {\n LLVM_DEBUG(llvm::dbgs()\n << \"Expected a uniform quantized type. Got: \" << type << \".\\n\");\n return false;\n }\n if (!IsStorageTypeI8(quantized_per_axis_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an i8 storage type. Got: \"\n << quantized_per_axis_type << \".\\n\");\n return false;\n }\n if (!IsExpressedTypeF32(quantized_per_axis_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an f32 expressed type. Got: \"\n << quantized_per_axis_type << \".\\n\");\n return false;\n }\n return true;\n}\nbool IsI32F32UniformQuantizedType(const Type type) {\n const UniformQuantizedType quantized_type =\n mlir::dyn_cast_or_null(type);\n if (!quantized_type) {\n LLVM_DEBUG(llvm::dbgs()\n << \"Expected a uniform quantized type. Got: \" << type << \".\\n\");\n return false;\n }\n if (!IsStorageTypeI32(quantized_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an i32 storage type. Got: \"\n << quantized_type << \".\\n\");\n return false;\n }\n if (!IsExpressedTypeF32(quantized_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an f32 expressed type. Got: \"\n << quantized_type << \".\\n\");\n return false;\n }\n return true;\n}\nbool IsI32F32UniformQuantizedPerAxisType(const Type type) {\n const UniformQuantizedPerAxisType quantized_per_axis_type =\n mlir::dyn_cast_or_null(type);\n if (!quantized_per_axis_type) {\n LLVM_DEBUG(llvm::dbgs()\n << \"Expected a uniform quantized type. Got: \" << type << \".\\n\");\n return false;\n }\n if (!IsStorageTypeI32(quantized_per_axis_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an i32 storage type. Got: \"\n << quantized_per_axis_type << \".\\n\");\n return false;\n }\n if (!IsExpressedTypeF32(quantized_per_axis_type)) {\n LLVM_DEBUG(llvm::dbgs() << \"Expected an f32 expressed type. Got: \"\n << quantized_per_axis_type << \".\\n\");\n return false;\n }\n return true;\n}\nbool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {\n if (storage_type.getWidth() == 8 ||\n (storage_type.isSigned() && storage_type.getWidth() == 16)) {\n return true;\n }\n LLVM_DEBUG(llvm::dbgs()\n << \"Uniform quantize / dequantize op only supports ui8, i8 or \"\n \"i16 for the storage type of uniform quantized type. Got: \"\n << storage_type << \".\\n\");\n return false;\n}\nbool IsQuantizedTensorType(Type type) {\n if (!mlir::isa(type)) {\n return false;\n }\n Type element_type = mlir::cast(type).getElementType();\n return mlir::isa(element_type);\n}\nbool IsOpFullyQuantized(Operation* op) {\n return llvm::all_of(op->getOperandTypes(), IsQuantizedTensorType) &&\n llvm::all_of(op->getResultTypes(), IsQuantizedTensorType);\n}\nbool IsOpNotQuantized(Operation* op) {\n return !llvm::any_of(op->getOperandTypes(), IsQuantizedTensorType) &&\n !llvm::any_of(op->getResultTypes(), IsQuantizedTensorType);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"mlir/Dialect/Func/IR/FuncOps.h\" \n#include \"mlir/Dialect/Quant/IR/Quant.h\" \n#include \"mlir/Dialect/Quant/IR/QuantTypes.h\" \n#include \"mlir/IR/Builders.h\" \n#include \"mlir/IR/BuiltinOps.h\" \n#include \"mlir/IR/Location.h\" \n#include \"mlir/IR/MLIRContext.h\" \n#include \"mlir/IR/OwningOpRef.h\" \n#include \"mlir/IR/Value.h\" \n#include \"mlir/Support/LLVM.h\" \n#include \"stablehlo/dialect/StablehloOps.h\" \n#include \"tensorflow/compiler/mlir/quantization/common/test_base.h\"\nnamespace mlir {\nnamespace quant {\nnamespace {\nusing ::testing::ElementsAreArray;\nusing ::testing::IsNull;\nusing ::testing::Ne;\nusing ::testing::NotNull;\nusing ::testing::Test;\nclass CreateI8F32UniformQuantizedTypeTest : public Test {\n protected:\n CreateI8F32UniformQuantizedTypeTest() : ctx_() {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n};\nTEST_F(CreateI8F32UniformQuantizedTypeTest, I8StorageTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));\n}\nTEST_F(CreateI8F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.getExpressedType().isF32());\n}\nTEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.isSigned());\n}\nTEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI8MinMax) {\n const UniformQuantizedType quantized_type =\n CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);\n EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);\n}\nTEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxNarrowRange) {\n const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(\n UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0, true);\n EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);\n EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);\n}\nTEST_F(CreateI8F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {\n const UniformQuantizedType quantized_type =\n CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 8.0, 99);\n EXPECT_EQ(quantized_type.getScale(), 8.0);\n EXPECT_EQ(quantized_type.getZeroPoint(), 99);\n}\nclass CreateI32F32UniformQuantizedTypeTest : public Test {\n protected:\n CreateI32F32UniformQuantizedTypeTest() : ctx_() {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n};\nTEST_F(CreateI32F32UniformQuantizedTypeTest, I32StorageTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));\n}\nTEST_F(CreateI32F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.getExpressedType().isF32());\n}\nTEST_F(CreateI32F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {\n const UniformQuantizedType quantized_type =\n CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_TRUE(quantized_type.isSigned());\n}\nTEST_F(CreateI32F32UniformQuantizedTypeTest,\n StorageTypeMinMaxEqualToI32MinMax) {\n const UniformQuantizedType quantized_type =\n CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 1.0, 0);\n EXPECT_EQ(quantized_type.getStorageTypeMin(),\n std::numeric_limits::min());\n EXPECT_EQ(quantized_type.getStorageTypeMax(),\n std::numeric_limits::max());\n}\nTEST_F(CreateI32F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {\n const UniformQuantizedType quantized_type =\n CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,\n 8.0, 1111);\n EXPECT_EQ(quantized_type.getScale(), 8.0);\n EXPECT_EQ(quantized_type.getZeroPoint(), 1111);\n}\nclass CreateI8F32UniformQuantizedPerAxisTypeTest : public Test {\n protected:\n CreateI8F32UniformQuantizedPerAxisTypeTest() : ctx_() {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n};\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, I8StorageTypeSucceeds) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_TRUE(quantized_type.getExpressedType().isF32());\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,\n SignedQuantizedTypeSucceeds) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_TRUE(quantized_type.isSigned());\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,\n StorageTypeMinMaxEqualToI8MinMax) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);\n EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,\n StorageTypeMinMaxNarrowRange) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0, true);\n EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);\n EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,\n HasQuantizationDimensionProperlySet) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 3);\n EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);\n}\nTEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,\n HasScaleAndZeroPointProperlySet) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI8F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{8.0, 9.0},\n SmallVector{98, 99},\n 0);\n EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));\n EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));\n}\nclass CreateI32F32UniformQuantizedPerAxisTypeTest : public Test {\n protected:\n CreateI32F32UniformQuantizedPerAxisTypeTest() : ctx_() {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n};\nTEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, I32StorageTypeSucceeds) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI32F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));\n}\nTEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI32F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_TRUE(quantized_type.getExpressedType().isF32());\n}\nTEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,\n StorageTypeMinMaxEqualToI32MinMax) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI32F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 0);\n EXPECT_EQ(quantized_type.getStorageTypeMin(),\n std::numeric_limits::min());\n EXPECT_EQ(quantized_type.getStorageTypeMax(),\n std::numeric_limits::max());\n}\nTEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,\n HasQuantizationDimensionProperlySet) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI32F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{1.0, 1.0},\n SmallVector{0, 0},\n 3);\n EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);\n}\nTEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,\n HasScaleAndZeroPointProperlySet) {\n const UniformQuantizedPerAxisType quantized_type =\n CreateI32F32UniformQuantizedPerAxisType(\n UnknownLoc::get(&ctx_), ctx_,\n SmallVector{8.0, 9.0},\n SmallVector{98, 99},\n 0);\n EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));\n EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));\n}\nclass IsI8F32UniformQuantizedTypeTest : public Test {\n protected:\n IsI8F32UniformQuantizedTypeTest() : builder_(&ctx_) {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_;\n};\nTEST_F(IsI8F32UniformQuantizedTypeTest, I8F32UniformQuantizedTypeSucceeds) {\n const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(), 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsI8F32UniformQuantizedType(qi8_type));\n}\nTEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {\n const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(), 1.0,\n 0, -128, 127);\n EXPECT_THAT(mlir::dyn_cast_or_null(qi8_type),\n NotNull());\n}\nTEST_F(IsI8F32UniformQuantizedTypeTest, StorageTypeI8Succeeds) {\n const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(), 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsStorageTypeI8(qi8_type));\n}\nTEST_F(IsI8F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {\n const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(), 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsExpressedTypeF32(qi8_type));\n}\nclass IsI8F32UniformQuantizedPerAxisTypeTest : public Test {\n protected:\n IsI8F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_;\n};\nTEST_F(IsI8F32UniformQuantizedPerAxisTypeTest,\n I8F32UniformQuantizedPerAxisTypeSucceeds) {\n const UniformQuantizedPerAxisType qi8_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0, -128,\n 127);\n EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type));\n EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type));\n}\nTEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {\n const UniformQuantizedPerAxisType qi8_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0, -128,\n 127);\n EXPECT_THAT(\n mlir::dyn_cast_or_null(qi8_per_axis_type),\n NotNull());\n}\nTEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {\n const UniformQuantizedPerAxisType qi8_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0, -128,\n 127);\n EXPECT_TRUE(IsStorageTypeI8(qi8_per_axis_type));\n}\nTEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {\n const UniformQuantizedPerAxisType qi8_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0, -128,\n 127);\n EXPECT_TRUE(IsExpressedTypeF32(qi8_per_axis_type));\n}\nclass IsI32F32UniformQuantizedTypeTest : public Test {\n protected:\n IsI32F32UniformQuantizedTypeTest() : builder_(&ctx_) {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_;\n};\nTEST_F(IsI32F32UniformQuantizedTypeTest, I32F32UniformQuantizedTypeSucceeds) {\n const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -2147483647,\n 2147483646);\n EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));\n}\nTEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {\n const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -2147483647,\n 2147483646);\n EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));\n EXPECT_THAT(mlir::dyn_cast_or_null(qi32_type),\n NotNull());\n}\nTEST_F(IsI32F32UniformQuantizedTypeTest, StorageTypeI32Succeeds) {\n const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -2147483647,\n 2147483646);\n EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));\n EXPECT_TRUE(IsStorageTypeI32(qi32_type));\n}\nTEST_F(IsI32F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {\n const UniformQuantizedType qi32_per_axis_type =\n quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -2147483647,\n 2147483646);\n EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));\n}\nclass IsI32F32UniformQuantizedPerAxisTypeTest : public Test {\n protected:\n IsI32F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_;\n};\nTEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,\n I32F32UniformQuantizedPerAxisTypeSucceeds) {\n const UniformQuantizedPerAxisType qi32_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0,\n -2147483647, 2147483646);\n EXPECT_TRUE(IsI32F32UniformQuantizedPerAxisType(qi32_per_axis_type));\n EXPECT_FALSE(IsI32F32UniformQuantizedType(qi32_per_axis_type));\n}\nTEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,\n I8F32UniformQuantizedTypeFails) {\n const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n 1.0, 0, -128,\n 127);\n EXPECT_FALSE(IsI32F32UniformQuantizedPerAxisType(qi8_type));\n EXPECT_FALSE(IsStorageTypeI32(qi8_type));\n EXPECT_THAT(mlir::dyn_cast_or_null(qi8_type),\n IsNull());\n}\nTEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {\n const UniformQuantizedPerAxisType qi32_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0,\n -2147483647, 2147483646);\n EXPECT_THAT(\n mlir::dyn_cast_or_null(qi32_per_axis_type),\n NotNull());\n}\nTEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {\n const UniformQuantizedPerAxisType qi32_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0,\n -2147483647, 2147483646);\n EXPECT_TRUE(IsStorageTypeI32(qi32_per_axis_type));\n}\nTEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {\n const UniformQuantizedPerAxisType qi32_per_axis_type =\n quant::UniformQuantizedPerAxisType::get(\n QuantizationFlags::Signed, builder_.getI32Type(),\n builder_.getF32Type(),\n {1.0},\n {0}, 0,\n -2147483647, 2147483646);\n EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));\n}\nclass IsSupportedByTfliteQuantizeOrDequantizeOpsTest : public Test {\n protected:\n IsSupportedByTfliteQuantizeOrDequantizeOpsTest() : builder_(&ctx_) {\n ctx_.loadDialect();\n }\n MLIRContext ctx_;\n OpBuilder builder_;\n};\nTEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI8Succeeds) {\n auto qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(\n dyn_cast_or_null(qi8_type.getStorageType())));\n}\nTEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI16Succeeds) {\n auto qi16_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(\n dyn_cast_or_null(qi16_type.getStorageType())));\n}\nTEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeUI8Succeeds) {\n auto qi8_type = quant::UniformQuantizedType::get(\n QuantizationFlags::Signed, builder_.getI8Type(),\n builder_.getF32Type(),\n 1.0,\n 0, -128, 127);\n EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(\n dyn_cast_or_null(qi8_type.getStorageType())));\n}\nusing IsOpFullyQuantizedTest = QuantizationTestBase;\nTEST_F(IsOpFullyQuantizedTest, TrueIfOpFullyQuantized) {\n constexpr absl::string_view kFullyQuantizedAdd = R\"mlir(\n func.func @fully_quantized_add(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> {\n %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform>\n return %0 : tensor<2x!quant.uniform>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kFullyQuantizedAdd);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"fully_quantized_add\");\n ASSERT_THAT(func_op, NotNull());\n auto add_op_itr = func_op.getBody().op_begin();\n ASSERT_THAT(add_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr));\n}\nTEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) {\n constexpr absl::string_view kNotQuantizedAdd = R\"mlir(\n func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {\n %0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>\n return %0 : tensor<2xf32>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kNotQuantizedAdd);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"not_quantized_add\");\n ASSERT_THAT(func_op, NotNull());\n auto add_op_itr = func_op.getBody().op_begin();\n ASSERT_THAT(add_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));\n}\nTEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {\n constexpr absl::string_view kQuantizeOp = R\"mlir(\n func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform> {\n %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform>\n return %0 : tensor<2x!quant.uniform>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kQuantizeOp);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"quantize\");\n ASSERT_THAT(func_op, NotNull());\n auto uniform_quantize_op_itr =\n func_op.getBody().op_begin();\n ASSERT_THAT(\n uniform_quantize_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_FALSE(IsOpFullyQuantized(*uniform_quantize_op_itr));\n}\nusing IsOpNotQuantizedTest = QuantizationTestBase;\nTEST_F(IsOpNotQuantizedTest, TrueIfOpNotQuantized) {\n constexpr absl::string_view kNotQuantizedAdd = R\"mlir(\n func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {\n %0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>\n return %0 : tensor<2xf32>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kNotQuantizedAdd);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"not_quantized_add\");\n ASSERT_THAT(func_op, NotNull());\n auto add_op_itr = func_op.getBody().op_begin();\n ASSERT_THAT(add_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_TRUE(IsOpNotQuantized(*add_op_itr));\n}\nTEST_F(IsOpNotQuantizedTest, FalseIfOpQuantized) {\n constexpr absl::string_view kQuantizedAdd = R\"mlir(\n func.func @quantized_add(%arg0: tensor<2x!quant.uniform>) -> tensor<2x!quant.uniform> {\n %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform>\n return %0 : tensor<2x!quant.uniform>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kQuantizedAdd);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"quantized_add\");\n ASSERT_THAT(func_op, NotNull());\n auto add_op_itr = func_op.getBody().op_begin();\n ASSERT_THAT(add_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_FALSE(IsOpNotQuantized(*add_op_itr));\n}\nTEST_F(IsOpNotQuantizedTest, FalseIfOpPartiallyQuantized) {\n constexpr absl::string_view kQuantizeOp = R\"mlir(\n func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform> {\n %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform>\n return %0 : tensor<2x!quant.uniform>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kQuantizeOp);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"quantize\");\n ASSERT_THAT(func_op, NotNull());\n auto uniform_quantize_op_itr =\n func_op.getBody().op_begin();\n ASSERT_THAT(\n uniform_quantize_op_itr,\n Ne(func_op.getBody().op_end()));\n EXPECT_FALSE(IsOpNotQuantized(*uniform_quantize_op_itr));\n}\nusing UniformQuantizedTypeTest = QuantizationTestBase;\nTEST_F(UniformQuantizedTypeTest, GetElementTypeSucceeds) {\n constexpr absl::string_view kQuantizeOp = R\"mlir(\n func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform> {\n %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform>\n return %0 : tensor<2x!quant.uniform>\n }\n )mlir\";\n OwningOpRef module_op = ParseModuleOpString(kQuantizeOp);\n ASSERT_TRUE(module_op);\n auto func_op = module_op->lookupSymbol(\"quantize\");\n ASSERT_THAT(func_op, NotNull());\n auto uniform_quantize_op =\n *func_op.getOps<::mlir::stablehlo::UniformQuantizeOp>().begin();\n Value result = uniform_quantize_op.getResult();\n EXPECT_THAT(GetElementType(result), NotNull());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":215,"cells":{"ID":{"kind":"string","value":"94996556-fc86-4b9e-8614-170ea8e383c5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tensor_id"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/graph/tensor_id.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/graph/tensor_id_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/graph/tensor_id.h\"\n#include \n#include \"tensorflow/core/lib/core/stringpiece.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\nnamespace tensorflow {\nTensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {}\nSafeTensorId::SafeTensorId(const TensorId& id)\n : SafeTensorId(string(id.first), id.second) {}\nTensorId ParseTensorName(const string& name) {\n return ParseTensorName(StringPiece(name.data(), name.size()));\n}\nTensorId ParseTensorName(StringPiece name) {\n const char* base = name.data();\n const char* p = base + name.size() - 1;\n unsigned int index = 0;\n unsigned int mul = 1;\n while (p > base && (*p >= '0' && *p <= '9')) {\n index += ((*p - '0') * mul);\n mul *= 10;\n p--;\n }\n TensorId id;\n if (p > base && *p == ':' && mul > 1) {\n id.first = StringPiece(base, p - base);\n id.second = index;\n } else if (absl::StartsWith(name, \"^\")) {\n id.first = StringPiece(base + 1);\n id.second = Graph::kControlSlot;\n } else {\n id.first = name;\n id.second = 0;\n }\n return id;\n}\nbool IsTensorIdControl(const TensorId& tensor_id) {\n return tensor_id.index() == Graph::kControlSlot;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/graph/tensor_id.h\"\n#include \n#include \"tensorflow/core/lib/random/simple_philox.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\nnamespace tensorflow {\nnamespace {\nstring ParseHelper(const string& n) { return ParseTensorName(n).ToString(); }\nTEST(TensorIdTest, ParseTensorName) {\n EXPECT_EQ(ParseHelper(\"W1\"), \"W1:0\");\n EXPECT_EQ(ParseHelper(\"W1:0\"), \"W1:0\");\n EXPECT_EQ(ParseHelper(\"weights:0\"), \"weights:0\");\n EXPECT_EQ(ParseHelper(\"W1:1\"), \"W1:1\");\n EXPECT_EQ(ParseHelper(\"W1:17\"), \"W1:17\");\n EXPECT_EQ(ParseHelper(\"xyz1_17\"), \"xyz1_17:0\");\n EXPECT_EQ(ParseHelper(\"^foo\"), \"^foo\");\n}\nuint32 Skewed(random::SimplePhilox* rnd, int max_log) {\n const uint32 space = 1 << (rnd->Rand32() % (max_log + 1));\n return rnd->Rand32() % space;\n}\nvoid BM_ParseTensorName(::testing::benchmark::State& state) {\n const int arg = state.range(0);\n random::PhiloxRandom philox(301, 17);\n random::SimplePhilox rnd(&philox);\n std::vector names;\n for (int i = 0; i < 100; i++) {\n string name;\n switch (arg) {\n case 0: { \n size_t len = Skewed(&rnd, 4);\n while (name.size() < len) {\n name += rnd.OneIn(4) ? '0' : 'a';\n }\n if (rnd.OneIn(3)) {\n strings::StrAppend(&name, \":\", rnd.Uniform(12));\n }\n break;\n }\n case 1:\n name = \"W1\";\n break;\n case 2:\n name = \"t0003\";\n break;\n case 3:\n name = \"weights\";\n break;\n case 4:\n name = \"weights:17\";\n break;\n case 5:\n name = \"^weights\";\n break;\n default:\n LOG(FATAL) << \"Unexpected arg\";\n break;\n }\n names.push_back(name);\n }\n TensorId id;\n int index = 0;\n int sum = 0;\n for (auto s : state) {\n id = ParseTensorName(names[index++ % names.size()]);\n sum += id.second;\n }\n VLOG(2) << sum; \n}\nBENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);\nTEST(TensorIdTest, IsTensorIdControl) {\n string input = \"^foo\";\n TensorId tensor_id = ParseTensorName(input);\n EXPECT_TRUE(IsTensorIdControl(tensor_id));\n input = \"foo\";\n tensor_id = ParseTensorName(input);\n EXPECT_FALSE(IsTensorIdControl(tensor_id));\n input = \"foo:2\";\n tensor_id = ParseTensorName(input);\n EXPECT_FALSE(IsTensorIdControl(tensor_id));\n}\nTEST(TensorIdTest, PortZero) {\n for (string input : {\"foo\", \"foo:0\"}) {\n TensorId tensor_id = ParseTensorName(input);\n EXPECT_EQ(\"foo\", tensor_id.node());\n EXPECT_EQ(0, tensor_id.index());\n }\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":216,"cells":{"ID":{"kind":"string","value":"9f641e69-b766-49cc-9efc-fb43469c2b28"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"globals"},"File Path in Repository":{"kind":"string","value":"absl/log/internal/globals.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/log/globals_test.cc"},"Code":{"kind":"string","value":"#include \"absl/log/internal/globals.h\"\n#include \n#include \n#if defined(__EMSCRIPTEN__)\n#include \n#endif\n#include \"absl/base/attributes.h\"\n#include \"absl/base/config.h\"\n#include \"absl/base/internal/raw_logging.h\"\n#include \"absl/base/log_severity.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/strip.h\"\n#include \"absl/time/time.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace log_internal {\nnamespace {\nABSL_CONST_INIT std::atomic logging_initialized(false);\nABSL_CONST_INIT std::atomic timezone_ptr{nullptr};\nABSL_CONST_INIT std::atomic symbolize_stack_trace(true);\nABSL_CONST_INIT std::atomic max_frames_in_stack_trace(64);\nABSL_CONST_INIT std::atomic exit_on_dfatal(true);\nABSL_CONST_INIT std::atomic suppress_sigabort_trace(false);\n} \nbool IsInitialized() {\n return logging_initialized.load(std::memory_order_acquire);\n}\nvoid SetInitialized() {\n logging_initialized.store(true, std::memory_order_release);\n}\nvoid WriteToStderr(absl::string_view message, absl::LogSeverity severity) {\n if (message.empty()) return;\n#if defined(__EMSCRIPTEN__)\n const auto message_minus_newline = absl::StripSuffix(message, \"\\n\");\n#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043\n emscripten_errn(message_minus_newline.data(), message_minus_newline.size());\n#else\n std::string null_terminated_message(message_minus_newline);\n _emscripten_err(null_terminated_message.c_str());\n#endif\n#else\n std::fwrite(message.data(), message.size(), 1, stderr);\n#endif\n#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16)\n if (severity >= absl::LogSeverity::kWarning) {\n std::fflush(stderr);\n }\n#else\n (void)severity;\n#endif\n}\nvoid SetTimeZone(absl::TimeZone tz) {\n absl::TimeZone* expected = nullptr;\n absl::TimeZone* new_tz = new absl::TimeZone(tz);\n if (!timezone_ptr.compare_exchange_strong(expected, new_tz,\n std::memory_order_release,\n std::memory_order_relaxed)) {\n ABSL_RAW_LOG(FATAL,\n \"absl::log_internal::SetTimeZone() has already been called\");\n }\n}\nconst absl::TimeZone* TimeZone() {\n return timezone_ptr.load(std::memory_order_acquire);\n}\nbool ShouldSymbolizeLogStackTrace() {\n return symbolize_stack_trace.load(std::memory_order_acquire);\n}\nvoid EnableSymbolizeLogStackTrace(bool on_off) {\n symbolize_stack_trace.store(on_off, std::memory_order_release);\n}\nint MaxFramesInLogStackTrace() {\n return max_frames_in_stack_trace.load(std::memory_order_acquire);\n}\nvoid SetMaxFramesInLogStackTrace(int max_num_frames) {\n max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release);\n}\nbool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); }\nvoid SetExitOnDFatal(bool on_off) {\n exit_on_dfatal.store(on_off, std::memory_order_release);\n}\nbool SuppressSigabortTrace() {\n return suppress_sigabort_trace.load(std::memory_order_acquire);\n}\nbool SetSuppressSigabortTrace(bool on_off) {\n return suppress_sigabort_trace.exchange(on_off);\n}\n} \nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/log/globals.h\"\n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/base/attributes.h\"\n#include \"absl/base/log_severity.h\"\n#include \"absl/log/internal/globals.h\"\n#include \"absl/log/internal/test_helpers.h\"\n#include \"absl/log/log.h\"\n#include \"absl/log/scoped_mock_log.h\"\nnamespace {\nusing ::testing::_;\nusing ::testing::StrEq;\nauto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(\n new absl::log_internal::LogTestEnvironment);\nconstexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() {\n return absl::LogSeverityAtLeast::kInfo;\n}\nconstexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {\n return absl::LogSeverityAtLeast::kError;\n}\nTEST(TestGlobals, MinLogLevel) {\n EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());\n absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError);\n EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);\n absl::SetMinLogLevel(DefaultMinLogLevel());\n}\nTEST(TestGlobals, ScopedMinLogLevel) {\n EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());\n {\n absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold(\n absl::LogSeverityAtLeast::kError);\n EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);\n }\n EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());\n}\nTEST(TestGlobals, StderrThreshold) {\n EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());\n absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError);\n EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);\n absl::SetStderrThreshold(DefaultStderrThreshold());\n}\nTEST(TestGlobals, ScopedStderrThreshold) {\n EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());\n {\n absl::ScopedStderrThreshold scoped_stderr_threshold(\n absl::LogSeverityAtLeast::kError);\n EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);\n }\n EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());\n}\nTEST(TestGlobals, LogBacktraceAt) {\n EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt(\"some_file.cc\", 111));\n absl::SetLogBacktraceLocation(\"some_file.cc\", 111);\n EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt(\"some_file.cc\", 111));\n EXPECT_FALSE(\n absl::log_internal::ShouldLogBacktraceAt(\"another_file.cc\", 222));\n}\nTEST(TestGlobals, LogPrefix) {\n EXPECT_TRUE(absl::ShouldPrependLogPrefix());\n absl::EnableLogPrefix(false);\n EXPECT_FALSE(absl::ShouldPrependLogPrefix());\n absl::EnableLogPrefix(true);\n EXPECT_TRUE(absl::ShouldPrependLogPrefix());\n}\nTEST(TestGlobals, SetGlobalVLogLevel) {\n EXPECT_EQ(absl::SetGlobalVLogLevel(42), 0);\n EXPECT_EQ(absl::SetGlobalVLogLevel(1337), 42);\n EXPECT_EQ(absl::SetGlobalVLogLevel(0), 1337);\n}\nTEST(TestGlobals, SetVLogLevel) {\n EXPECT_EQ(absl::SetVLogLevel(\"setvloglevel\", 42), 0);\n EXPECT_EQ(absl::SetVLogLevel(\"setvloglevel\", 1337), 42);\n EXPECT_EQ(absl::SetVLogLevel(\"othersetvloglevel\", 50), 0);\n EXPECT_EQ(absl::SetVLogLevel(\"*pattern*\", 1), 0);\n EXPECT_EQ(absl::SetVLogLevel(\"*less_generic_pattern*\", 2), 1);\n EXPECT_EQ(absl::SetVLogLevel(\"pattern_match\", 3), 1);\n EXPECT_EQ(absl::SetVLogLevel(\"less_generic_pattern_match\", 4), 2);\n}\nTEST(TestGlobals, AndroidLogTag) {\n EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), \".*\");\n EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq(\"native\"));\n absl::SetAndroidNativeTag(\"test_tag\");\n EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq(\"test_tag\"));\n EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(\"test_tag_fail\"), \".*\");\n}\nTEST(TestExitOnDFatal, OffTest) {\n absl::log_internal::SetExitOnDFatal(false);\n EXPECT_FALSE(absl::log_internal::ExitOnDFatal());\n {\n absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);\n EXPECT_CALL(log, Log(absl::kLogDebugFatal, _, \"This should not be fatal\"));\n log.StartCapturingLogs();\n LOG(DFATAL) << \"This should not be fatal\";\n }\n}\n#if GTEST_HAS_DEATH_TEST\nTEST(TestDeathWhileExitOnDFatal, OnTest) {\n absl::log_internal::SetExitOnDFatal(true);\n EXPECT_TRUE(absl::log_internal::ExitOnDFatal());\n EXPECT_DEBUG_DEATH({ LOG(DFATAL) << \"This should be fatal in debug mode\"; },\n \"This should be fatal in debug mode\");\n}\n#endif\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/internal/globals.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/globals_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":217,"cells":{"ID":{"kind":"string","value":"19afc211-8c06-4f02-9ab1-ee2b82b6cba1"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"strcat"},"File Path in Repository":{"kind":"string","value":"third_party/xla/third_party/tsl/tsl/platform/strcat.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc"},"Code":{"kind":"string","value":"#include \"tsl/platform/strcat.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/meta/type_traits.h\"\n#include \"tsl/platform/logging.h\"\nnamespace tsl {\nnamespace strings {\nAlphaNum::AlphaNum(Hex hex) {\n char *const end = &digits_[kFastToBufferSize];\n char *writer = end;\n uint64 value = hex.value;\n uint64 width = hex.spec;\n uint64 mask = (static_cast(1) << (width - 1) * 4) | value;\n static const char hexdigits[] = \"0123456789abcdef\";\n do {\n *--writer = hexdigits[value & 0xF];\n value >>= 4;\n mask >>= 4;\n } while (mask != 0);\n piece_ = absl::string_view(writer, end - writer);\n}\nstatic char *Append1(char *out, const AlphaNum &x) {\n if (x.data() == nullptr) return out;\n memcpy(out, x.data(), x.size());\n return out + x.size();\n}\nstatic char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {\n if (x1.data() != nullptr) {\n memcpy(out, x1.data(), x1.size());\n out += x1.size();\n }\n if (x2.data() == nullptr) return out;\n memcpy(out, x2.data(), x2.size());\n return out + x2.size();\n}\nstatic char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2,\n const AlphaNum &x3, const AlphaNum &x4) {\n if (x1.data() != nullptr) {\n memcpy(out, x1.data(), x1.size());\n out += x1.size();\n }\n if (x2.data() != nullptr) {\n memcpy(out, x2.data(), x2.size());\n out += x2.size();\n }\n if (x3.data() != nullptr) {\n memcpy(out, x3.data(), x3.size());\n out += x3.size();\n }\n if (x4.data() == nullptr) return out;\n memcpy(out, x4.data(), x4.size());\n return out + x4.size();\n}\nstring StrCat(const AlphaNum &a) { return string(a.data(), a.size()); }\nstring StrCat(const AlphaNum &a, const AlphaNum &b) {\n string result(a.size() + b.size(), '\\0');\n char *const begin = &*result.begin();\n char *out = Append2(begin, a, b);\n DCHECK_EQ(out, begin + result.size());\n return result;\n}\nstring StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {\n string result(a.size() + b.size() + c.size(), '\\0');\n char *const begin = &*result.begin();\n char *out = Append2(begin, a, b);\n out = Append1(out, c);\n DCHECK_EQ(out, begin + result.size());\n return result;\n}\nstring StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,\n const AlphaNum &d) {\n string result(a.size() + b.size() + c.size() + d.size(), '\\0');\n char *const begin = &*result.begin();\n char *out = Append4(begin, a, b, c, d);\n DCHECK_EQ(out, begin + result.size());\n return result;\n}\nnamespace {\ntemplate \nstruct ResizeUninitializedTraits {\n using HasMember = std::false_type;\n static void Resize(string_type *s, size_t new_size) { s->resize(new_size); }\n};\ntemplate \nstruct ResizeUninitializedTraits<\n string_type, absl::void_t()\n .__resize_default_init(237))> > {\n using HasMember = std::true_type;\n static void Resize(string_type *s, size_t new_size) {\n s->__resize_default_init(new_size);\n }\n};\nstatic inline void STLStringResizeUninitialized(string *s, size_t new_size) {\n ResizeUninitializedTraits::Resize(s, new_size);\n}\ntemplate \nvoid STLStringReserveAmortized(string_type *s, size_t new_size) {\n const size_t cap = s->capacity();\n if (new_size > cap) {\n s->reserve((std::max)(new_size, 2 * cap));\n }\n}\ntemplate \nvoid STLStringResizeUninitializedAmortized(string_type *s, size_t new_size) {\n STLStringReserveAmortized(s, new_size);\n STLStringResizeUninitialized(s, new_size);\n}\n} \nnamespace internal {\nstring CatPieces(std::initializer_list pieces) {\n size_t total_size = 0;\n for (const absl::string_view piece : pieces) total_size += piece.size();\n string result(total_size, '\\0');\n char *const begin = &*result.begin();\n char *out = begin;\n for (const absl::string_view piece : pieces) {\n const size_t this_size = piece.size();\n memcpy(out, piece.data(), this_size);\n out += this_size;\n }\n DCHECK_EQ(out, begin + result.size());\n return result;\n}\n#define DCHECK_NO_OVERLAP(dest, src) \\\n DCHECK_GE(uintptr_t((src).data() - (dest).data()), uintptr_t((dest).size()))\nvoid AppendPieces(string *result,\n std::initializer_list pieces) {\n size_t old_size = result->size();\n size_t total_size = old_size;\n for (const absl::string_view piece : pieces) {\n DCHECK_NO_OVERLAP(*result, piece);\n total_size += piece.size();\n }\n STLStringResizeUninitializedAmortized(result, total_size);\n char *const begin = &*result->begin();\n char *out = begin + old_size;\n for (const absl::string_view piece : pieces) {\n const size_t this_size = piece.size();\n memcpy(out, piece.data(), this_size);\n out += this_size;\n }\n DCHECK_EQ(out, begin + result->size());\n}\n} \nvoid StrAppend(string *result, const AlphaNum &a) {\n DCHECK_NO_OVERLAP(*result, a);\n result->append(a.data(), a.size());\n}\nvoid StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) {\n DCHECK_NO_OVERLAP(*result, a);\n DCHECK_NO_OVERLAP(*result, b);\n string::size_type old_size = result->size();\n STLStringResizeUninitializedAmortized(result, old_size + a.size() + b.size());\n char *const begin = &*result->begin();\n char *out = Append2(begin + old_size, a, b);\n DCHECK_EQ(out, begin + result->size());\n}\nvoid StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,\n const AlphaNum &c) {\n DCHECK_NO_OVERLAP(*result, a);\n DCHECK_NO_OVERLAP(*result, b);\n DCHECK_NO_OVERLAP(*result, c);\n string::size_type old_size = result->size();\n STLStringResizeUninitializedAmortized(\n result, old_size + a.size() + b.size() + c.size());\n char *const begin = &*result->begin();\n char *out = Append2(begin + old_size, a, b);\n out = Append1(out, c);\n DCHECK_EQ(out, begin + result->size());\n}\nvoid StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,\n const AlphaNum &c, const AlphaNum &d) {\n DCHECK_NO_OVERLAP(*result, a);\n DCHECK_NO_OVERLAP(*result, b);\n DCHECK_NO_OVERLAP(*result, c);\n DCHECK_NO_OVERLAP(*result, d);\n string::size_type old_size = result->size();\n STLStringResizeUninitializedAmortized(\n result, old_size + a.size() + b.size() + c.size() + d.size());\n char *const begin = &*result->begin();\n char *out = Append4(begin + old_size, a, b, c, d);\n DCHECK_EQ(out, begin + result->size());\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tsl/platform/strcat.h\"\n#include \n#include \"absl/strings/string_view.h\"\n#include \"tsl/platform/stringprintf.h\"\n#include \"tsl/platform/test.h\"\n#include \"tsl/platform/types.h\"\n#ifdef _MSC_VER\ntypedef ptrdiff_t ssize_t;\n#endif\nnamespace tsl {\nnamespace strings {\nTEST(StrCat, Ints) {\n const int16_t s = -1;\n const uint16 us = 2;\n const int i = -3;\n const unsigned int ui = 4;\n const int32_t l = -5;\n const uint32 ul = 6;\n const int64_t ll = -7;\n const uint64 ull = 8;\n const ptrdiff_t ptrdiff = -9;\n const size_t size = 10;\n const ssize_t ssize = -11;\n const intptr_t intptr = -12;\n const uintptr_t uintptr = 13;\n string answer;\n answer = StrCat(s, us);\n EXPECT_EQ(answer, \"-12\");\n answer = StrCat(i, ui);\n EXPECT_EQ(answer, \"-34\");\n answer = StrCat(l, ul);\n EXPECT_EQ(answer, \"-56\");\n answer = StrCat(ll, ull);\n EXPECT_EQ(answer, \"-78\");\n answer = StrCat(ptrdiff, size);\n EXPECT_EQ(answer, \"-910\");\n answer = StrCat(ssize, intptr);\n EXPECT_EQ(answer, \"-11-12\");\n answer = StrCat(uintptr, 0);\n EXPECT_EQ(answer, \"130\");\n}\nTEST(StrCat, Floats) {\n const int s = 0;\n const float f = 1.5f;\n const double d = 1.5;\n const bfloat16 bf(1.5f);\n string answer;\n answer = StrCat(s, f);\n EXPECT_EQ(answer, \"01.5\");\n answer = StrCat(s, d);\n EXPECT_EQ(answer, \"01.5\");\n answer = StrCat(s, bf);\n EXPECT_EQ(answer, \"01.5\");\n}\nTEST(StrCat, Nulls) {\n string result;\n absl::string_view v;\n string strs[] = {\"Hello\", \"Cruel\", \"World\"};\n result = StrCat(v);\n EXPECT_EQ(result, \"\");\n result = StrCat(strs[0], v);\n EXPECT_EQ(result, \"Hello\");\n result = StrCat(v, strs[0]);\n EXPECT_EQ(result, \"Hello\");\n result = StrCat(v, strs[0], strs[1]);\n EXPECT_EQ(result, \"HelloCruel\");\n result = StrCat(strs[0], v, strs[1]);\n EXPECT_EQ(result, \"HelloCruel\");\n result = StrCat(strs[0], strs[1], v);\n EXPECT_EQ(result, \"HelloCruel\");\n result = StrCat(v, strs[0], strs[1], strs[2]);\n EXPECT_EQ(result, \"HelloCruelWorld\");\n result = StrCat(strs[0], v, strs[1], strs[2]);\n EXPECT_EQ(result, \"HelloCruelWorld\");\n result = StrCat(strs[0], strs[1], v, strs[2]);\n EXPECT_EQ(result, \"HelloCruelWorld\");\n result = StrCat(strs[0], strs[1], strs[2], v);\n EXPECT_EQ(result, \"HelloCruelWorld\");\n}\nTEST(StrCat, Basics) {\n string result;\n string strs[] = {\"Hello\", \"Cruel\", \"World\"};\n absl::string_view pieces[] = {\"Hello\", \"Cruel\", \"World\"};\n const char *c_strs[] = {\"Hello\", \"Cruel\", \"World\"};\n int32 i32s[] = {'H', 'C', 'W'};\n uint64 ui64s[] = {12345678910LL, 10987654321LL};\n result = StrCat(false, true, 2, 3);\n EXPECT_EQ(result, \"0123\");\n result = StrCat(-1);\n EXPECT_EQ(result, \"-1\");\n result = StrCat(0.5);\n EXPECT_EQ(result, \"0.5\");\n result = StrCat(strs[1], pieces[2]);\n EXPECT_EQ(result, \"CruelWorld\");\n result = StrCat(strs[0], \", \", pieces[2]);\n EXPECT_EQ(result, \"Hello, World\");\n result = StrCat(strs[0], \", \", strs[1], \" \", strs[2], \"!\");\n EXPECT_EQ(result, \"Hello, Cruel World!\");\n result = StrCat(pieces[0], \", \", pieces[1], \" \", pieces[2]);\n EXPECT_EQ(result, \"Hello, Cruel World\");\n result = StrCat(c_strs[0], \", \", c_strs[1], \" \", c_strs[2]);\n EXPECT_EQ(result, \"Hello, Cruel World\");\n result = StrCat(\"ASCII \", i32s[0], \", \", i32s[1], \" \", i32s[2], \"!\");\n EXPECT_EQ(result, \"ASCII 72, 67 87!\");\n result = StrCat(ui64s[0], \", \", ui64s[1], \"!\");\n EXPECT_EQ(result, \"12345678910, 10987654321!\");\n string one = \"1\"; \n result = StrCat(\"And a \", one.size(), \" and a \", &result[2] - &result[0],\n \" and a \", one, \" 2 3 4\", \"!\");\n EXPECT_EQ(result, \"And a 1 and a 2 and a 1 2 3 4!\");\n result = StrCat(\"To output a char by ASCII/numeric value, use +: \", '!' + 0);\n EXPECT_EQ(result, \"To output a char by ASCII/numeric value, use +: 33\");\n float f = 100000.5;\n result = StrCat(\"A hundred K and a half is \", f);\n EXPECT_EQ(result, \"A hundred K and a half is 100000.5\");\n double d = f;\n d *= d;\n result = StrCat(\"A hundred K and a half squared is \", d);\n EXPECT_EQ(result, \"A hundred K and a half squared is 10000100000.25\");\n result = StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999);\n EXPECT_EQ(result, \"12333444455555666666777777788888888999999999\");\n}\nTEST(StrCat, MaxArgs) {\n string result;\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\");\n EXPECT_EQ(result, \"123456789a\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\");\n EXPECT_EQ(result, \"123456789ab\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\");\n EXPECT_EQ(result, \"123456789abc\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\");\n EXPECT_EQ(result, \"123456789abcd\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\");\n EXPECT_EQ(result, \"123456789abcde\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\");\n EXPECT_EQ(result, \"123456789abcdef\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\");\n EXPECT_EQ(result, \"123456789abcdefg\");\n result =\n StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\");\n EXPECT_EQ(result, \"123456789abcdefgh\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\");\n EXPECT_EQ(result, \"123456789abcdefghi\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\");\n EXPECT_EQ(result, \"123456789abcdefghij\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\");\n EXPECT_EQ(result, \"123456789abcdefghijk\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\");\n EXPECT_EQ(result, \"123456789abcdefghijkl\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\", \"m\");\n EXPECT_EQ(result, \"123456789abcdefghijklm\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\");\n EXPECT_EQ(result, \"123456789abcdefghijklmn\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\");\n EXPECT_EQ(result, \"123456789abcdefghijklmno\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\");\n EXPECT_EQ(result, \"123456789abcdefghijklmnop\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\");\n EXPECT_EQ(result, \"123456789abcdefghijklmnopq\");\n result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, \"a\", \"b\", \"c\", \"d\", \"e\", \"f\",\n \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\",\n \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\", \"D\",\n \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\",\n \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\");\n EXPECT_EQ(result,\n \"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\");\n}\nTEST(StrAppend, Basics) {\n string result = \"existing text\";\n string strs[] = {\"Hello\", \"Cruel\", \"World\"};\n absl::string_view pieces[] = {\"Hello\", \"Cruel\", \"World\"};\n const char *c_strs[] = {\"Hello\", \"Cruel\", \"World\"};\n int32 i32s[] = {'H', 'C', 'W'};\n uint64 ui64s[] = {12345678910LL, 10987654321LL};\n string::size_type old_size = result.size();\n StrAppend(&result, strs[0]);\n EXPECT_EQ(result.substr(old_size), \"Hello\");\n old_size = result.size();\n StrAppend(&result, strs[1], pieces[2]);\n EXPECT_EQ(result.substr(old_size), \"CruelWorld\");\n old_size = result.size();\n StrAppend(&result, strs[0], \", \", pieces[2]);\n EXPECT_EQ(result.substr(old_size), \"Hello, World\");\n old_size = result.size();\n StrAppend(&result, strs[0], \", \", strs[1], \" \", strs[2], \"!\");\n EXPECT_EQ(result.substr(old_size), \"Hello, Cruel World!\");\n old_size = result.size();\n StrAppend(&result, pieces[0], \", \", pieces[1], \" \", pieces[2]);\n EXPECT_EQ(result.substr(old_size), \"Hello, Cruel World\");\n old_size = result.size();\n StrAppend(&result, c_strs[0], \", \", c_strs[1], \" \", c_strs[2]);\n EXPECT_EQ(result.substr(old_size), \"Hello, Cruel World\");\n old_size = result.size();\n StrAppend(&result, \"ASCII \", i32s[0], \", \", i32s[1], \" \", i32s[2], \"!\");\n EXPECT_EQ(result.substr(old_size), \"ASCII 72, 67 87!\");\n old_size = result.size();\n StrAppend(&result, ui64s[0], \", \", ui64s[1], \"!\");\n EXPECT_EQ(result.substr(old_size), \"12345678910, 10987654321!\");\n string one = \"1\"; \n old_size = result.size();\n StrAppend(&result, \"And a \", one.size(), \" and a \", &result[2] - &result[0],\n \" and a \", one, \" 2 3 4\", \"!\");\n EXPECT_EQ(result.substr(old_size), \"And a 1 and a 2 and a 1 2 3 4!\");\n old_size = result.size();\n StrAppend(&result,\n \"To output a char by ASCII/numeric value, use +: \", '!' + 0);\n EXPECT_EQ(result.substr(old_size),\n \"To output a char by ASCII/numeric value, use +: 33\");\n float f = 100000.5;\n old_size = result.size();\n StrAppend(&result, \"A hundred K and a half is \", f);\n EXPECT_EQ(result.substr(old_size), \"A hundred K and a half is 100000.5\");\n double d = f;\n d *= d;\n old_size = result.size();\n StrAppend(&result, \"A hundred K and a half squared is \", d);\n EXPECT_EQ(result.substr(old_size),\n \"A hundred K and a half squared is 10000100000.25\");\n old_size = result.size();\n StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9);\n EXPECT_EQ(result.substr(old_size), \"1223334444555556666667777777888888889\");\n old_size = result.size();\n StrAppend(&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, \"a\", \"b\", \"c\", \"d\", \"e\",\n \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\",\n \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\", \"D\", \"E\",\n \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\",\n \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\",\n \"No limit thanks to C++11's variadic templates\");\n EXPECT_EQ(result.substr(old_size),\n \"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"No limit thanks to C++11's variadic templates\");\n}\nTEST(StrAppend, Death) {\n string s = \"self\";\n EXPECT_DEBUG_DEATH(StrAppend(&s, s.c_str() + 1), \"Check failed:\");\n EXPECT_DEBUG_DEATH(StrAppend(&s, s), \"Check failed:\");\n}\nstatic void CheckHex64(uint64 v) {\n string actual = StrCat(Hex(v, kZeroPad16));\n string expected = Printf(\"%016llx\", static_cast(v));\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n actual = StrCat(Hex(v, kZeroPad8));\n expected = Printf(\"%08llx\", static_cast(v));\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n actual = StrCat(Hex(v));\n expected = Printf(\"%llx\", static_cast(v));\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n}\nstatic void CheckHex32(uint32 v) {\n string actual = StrCat(Hex(v, kZeroPad8));\n string expected = Printf(\"%08x\", v);\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n actual = StrCat(Hex(v));\n expected = Printf(\"%x\", v);\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n}\nstatic void CheckHexSigned32(int32_t v) {\n string actual = StrCat(Hex(v, kZeroPad8));\n string expected = Printf(\"%08x\", v);\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n actual = StrCat(Hex(v));\n expected = Printf(\"%x\", v);\n EXPECT_EQ(expected, actual) << \" decimal value \" << v;\n}\nstatic void TestFastPrints() {\n for (int i = 0; i < 10000; i++) {\n CheckHex64(i);\n CheckHex32(i);\n CheckHexSigned32(i);\n CheckHexSigned32(-i);\n }\n CheckHex64(0x123456789abcdef0ull);\n CheckHex32(0x12345678);\n int8_t minus_one_8bit = -1;\n EXPECT_EQ(\"ff\", StrCat(Hex(minus_one_8bit)));\n int16_t minus_one_16bit = -1;\n EXPECT_EQ(\"ffff\", StrCat(Hex(minus_one_16bit)));\n}\nTEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); }\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":218,"cells":{"ID":{"kind":"string","value":"ef1402fc-707f-440d-bf71-7892e6bea482"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/libaddressinput"},"File Name":{"kind":"string","value":"string_util"},"File Path in Repository":{"kind":"string","value":"cpp/src/util/string_util.cc"},"File Path for Unit Test":{"kind":"string","value":"cpp/test/util/string_util_test.cc"},"Code":{"kind":"string","value":"#include \"string_util.h\"\n#include \n#include \n#include \n#include \n#include \nnamespace i18n {\nnamespace addressinput {\nstd::string DoReplaceStringPlaceholders(const std::string& format_string,\n const std::vector& subst) {\n size_t substitutions = subst.size();\n size_t sub_length = 0;\n for (std::vector::const_iterator iter = subst.begin();\n iter != subst.end(); ++iter) {\n sub_length += iter->length();\n }\n std::string formatted;\n formatted.reserve(format_string.length() + sub_length);\n for (std::string::const_iterator i = format_string.begin();\n i != format_string.end(); ++i) {\n if ('$' == *i) {\n if (i + 1 != format_string.end()) {\n ++i;\n assert('$' == *i || '1' <= *i);\n if ('$' == *i) {\n while (i != format_string.end() && '$' == *i) {\n formatted.push_back('$');\n ++i;\n }\n --i;\n } else {\n uintptr_t index = 0;\n while (i != format_string.end() && '0' <= *i && *i <= '9') {\n index *= 10;\n index += *i - '0';\n ++i;\n }\n --i;\n index -= 1;\n if (index < substitutions)\n formatted.append(subst.at(index));\n }\n }\n } else {\n formatted.push_back(*i);\n }\n }\n return formatted;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"util/string_util.h\"\n#include \n#include \n#include \nnamespace {\nusing i18n::addressinput::DoReplaceStringPlaceholders;\nTEST(StringUtilTest, Ok) {\n const std::vector subst{\n \"A\",\n \"B\",\n \"C\",\n };\n EXPECT_EQ(\"aA,bB,cC\", DoReplaceStringPlaceholders(\"a$1,b$2,c$3\", subst));\n}\nTEST(StringUtilTest, FewParameters) {\n const std::vector subst{\n \"A\",\n \"B\",\n \"C\",\n };\n EXPECT_EQ(\"aA,bB,cC,d,aA\",\n DoReplaceStringPlaceholders(\"a$1,b$2,c$3,d$4,a$1\", subst));\n}\nTEST(StringUtilTest, MoreThan9Parameters) {\n const std::vector subst{\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"J\",\n \"K\",\n };\n EXPECT_EQ(\"aA,bB,cC,dD,eE,fF,gG,hH,iI,jJ,kK,aA\",\n DoReplaceStringPlaceholders(\"a$1,b$2,c$3,d$4,e$5,f$6,g$7,h$8,i$9,\"\n \"j$10,k$11,a$1\",\n subst));\n}\nTEST(StringUtilTest, ConsecutiveDollarSigns) {\n const std::vector subst{\n \"A\",\n \"B\",\n \"C\",\n };\n EXPECT_EQ(\"$1 $$2 $$$3\",\n DoReplaceStringPlaceholders(\"$$1 $$$2 $$$$3\", subst));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/string_util.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/string_util_test.cc"},"Commit Hash":{"kind":"string","value":"2610f7b1043d6784ada41392fc9392d1ea09ea07"}}},{"rowIdx":219,"cells":{"ID":{"kind":"string","value":"62bf0644-5132-4e87-bff3-4b0982acdd07"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"infeed_token_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/infeed_token_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/infeed_token_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/infeed_token_propagation.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/tuple_simplifier.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nbool IsDanglingInfeed(HloInstruction* infeed) {\n CHECK(infeed->opcode() == HloOpcode::kInfeed);\n if (infeed->has_sharding()) {\n return false;\n }\n if (const HloInstruction* after_all = infeed->operand(0);\n after_all->opcode() != HloOpcode::kAfterAll ||\n after_all->operand_count() != 0) {\n return false;\n }\n for (const HloInstruction* user : infeed->users()) {\n if (user->opcode() == HloOpcode::kGetTupleElement &&\n user->tuple_index() == 1) {\n return false;\n }\n }\n return true;\n}\nbool IsDanglingOutfeed(HloInstruction* outfeed) {\n CHECK(outfeed->opcode() == HloOpcode::kOutfeed);\n if (outfeed->has_sharding()) {\n return false;\n }\n if (const HloInstruction* after_all = outfeed->operand(1);\n after_all->opcode() != HloOpcode::kAfterAll ||\n after_all->operand_count() != 0) {\n return false;\n }\n if (outfeed->user_count() != 0) {\n return false;\n }\n return true;\n}\nHloInstruction* ReconstructTuple(HloInstruction* tuple) {\n CHECK(tuple->shape().IsTuple());\n HloComputation* computation = tuple->parent();\n std::vector gtes;\n gtes.resize(tuple->shape().tuple_shapes_size());\n for (int64_t idx = 0; idx < gtes.size(); ++idx) {\n gtes[idx] = computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(tuple, idx));\n }\n return computation->AddInstruction(HloInstruction::CreateTuple(gtes));\n}\nabsl::StatusOr InsertTokenIntoTuple(HloInstruction* tuple,\n bool add_token_operand) {\n CHECK(tuple->shape().IsTuple());\n HloComputation* computation = tuple->parent();\n std::vector original_users = tuple->users();\n HloInstruction* original_tuple = ReconstructTuple(tuple);\n for (HloInstruction* original_user : original_users) {\n for (int64_t idx : original_user->operand_indices(tuple)) {\n TF_RETURN_IF_ERROR(\n original_user->ReplaceOperandWith(idx, original_tuple));\n }\n }\n *tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape();\n if (add_token_operand) {\n tuple->AppendOperand(\n computation->AddInstruction(HloInstruction::CreateToken()));\n }\n HloInstruction* input_token_gte =\n computation->AddInstruction(HloInstruction::CreateGetTupleElement(\n tuple, tuple->shape().tuple_shapes_size() - 1));\n return input_token_gte;\n}\n} \nabsl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) {\n CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);\n for (HloComputation* branch : conditional->branch_computations()) {\n HloInstruction* parameter = branch->parameter_instruction(0);\n if (!parameter->shape().IsTuple()) {\n *parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({parameter->shape()});\n HloInstruction* original = branch->AddInstruction(\n HloInstruction::CreateGetTupleElement(parameter, 0));\n TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n int64_t branch_operand_idx = conditional->branch_index(branch) + 1;\n HloInstruction* branch_tuple =\n conditional->mutable_operand(branch_operand_idx);\n if (!branch_tuple->shape().IsTuple()) {\n branch_tuple = conditional->parent()->AddInstruction(\n HloInstruction::CreateTuple({branch_tuple}));\n TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape(\n branch_operand_idx, branch_tuple));\n }\n if (branch_tuple->opcode() == HloOpcode::kParameter) {\n branch_tuple = ReconstructTuple(branch_tuple);\n TF_RETURN_IF_ERROR(\n conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple));\n }\n HloInstruction* root = branch->root_instruction();\n if (root->opcode() != HloOpcode::kTuple) {\n root = ReconstructTuple(root);\n branch->set_root_instruction(root);\n }\n }\n CHECK(conditional->shape().IsTuple());\n if (conditional->IsRoot()) {\n HloInstruction* new_root = ReconstructTuple(conditional);\n conditional->parent()->set_root_instruction(new_root);\n }\n return absl::OkStatus();\n}\nabsl::Status CanonicalizeWhileInstruction(HloInstruction* loop) {\n CHECK_EQ(loop->opcode(), HloOpcode::kWhile);\n HloComputation* body = loop->while_body();\n HloComputation* cond = loop->while_condition();\n HloInstruction* body_parameter = body->parameter_instruction(0);\n if (!body_parameter->shape().IsTuple()) {\n *body_parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({body_parameter->shape()});\n HloInstruction* original = body->AddInstruction(\n HloInstruction::CreateGetTupleElement(body_parameter, 0));\n TF_RETURN_IF_ERROR(\n body_parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n HloInstruction* root = body->root_instruction();\n if (!root->shape().IsTuple()) {\n root = body->AddInstruction(HloInstruction::CreateTuple({root}));\n body->set_root_instruction(root, true);\n }\n HloInstruction* cond_parameter = cond->parameter_instruction(0);\n if (!cond_parameter->shape().IsTuple()) {\n *cond_parameter->mutable_shape() =\n ShapeUtil::MakeTupleShape({cond_parameter->shape()});\n HloInstruction* original = cond->AddInstruction(\n HloInstruction::CreateGetTupleElement(cond_parameter, 0));\n TF_RETURN_IF_ERROR(\n cond_parameter->ReplaceAllUsesWithDifferentShape(original));\n }\n if (!loop->shape().IsTuple()) {\n *loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()});\n HloInstruction* original = loop->parent()->AddInstruction(\n HloInstruction::CreateGetTupleElement(loop, 0));\n TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original));\n }\n HloInstruction* loop_tuple = loop->mutable_operand(0);\n if (!loop_tuple->shape().IsTuple()) {\n loop_tuple = loop->parent()->AddInstruction(\n HloInstruction::CreateTuple({loop_tuple}));\n TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple));\n }\n if (loop_tuple->opcode() == HloOpcode::kParameter) {\n loop_tuple = ReconstructTuple(loop_tuple);\n TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple));\n }\n if (root->opcode() != HloOpcode::kTuple) {\n root = ReconstructTuple(root);\n body->set_root_instruction(root);\n }\n if (loop->IsRoot()) {\n HloInstruction* new_root = ReconstructTuple(loop);\n loop->parent()->set_root_instruction(new_root);\n }\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() {\n HloComputation* comp = dangling_instruction_->parent();\n dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];\n CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional);\n for (HloComputation* branch : dangling_instruction_->branch_computations()) {\n HloInstruction* root = branch->root_instruction();\n if (branch == comp) {\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, false).status());\n root->AppendOperand(output_token_);\n } else {\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, true).status());\n }\n }\n HloInstruction* parameter = comp->parameter_instruction(0);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * input_token_gte,\n InsertTokenIntoTuple(parameter, false));\n TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));\n int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1;\n HloInstruction* branch_tuple =\n dangling_instruction_->mutable_operand(branch_operand_idx);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * next_input_token_gte,\n InsertTokenIntoTuple(branch_tuple, true));\n TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape(\n branch_operand_idx, branch_tuple));\n input_token_ =\n branch_tuple->mutable_operand(next_input_token_gte->tuple_index());\n TF_ASSIGN_OR_RETURN(\n output_token_,\n InsertTokenIntoTuple(dangling_instruction_, false));\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() {\n HloComputation* comp = dangling_instruction_->parent();\n dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];\n CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile);\n HloInstruction* root = comp->root_instruction();\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(root, false).status());\n root->AppendOperand(output_token_);\n HloInstruction* body_parameter = comp->parameter_instruction(0);\n TF_ASSIGN_OR_RETURN(\n HloInstruction * input_token_gte,\n InsertTokenIntoTuple(body_parameter, false));\n TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));\n HloComputation* cond = dangling_instruction_->while_condition();\n HloInstruction* cond_parameter = cond->parameter_instruction(0);\n TF_RETURN_IF_ERROR(\n InsertTokenIntoTuple(cond_parameter, false)\n .status());\n HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0);\n TF_ASSIGN_OR_RETURN(\n input_token_,\n InsertTokenIntoTuple(while_tuple, true));\n TF_RETURN_IF_ERROR(\n dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple));\n TF_ASSIGN_OR_RETURN(\n output_token_,\n InsertTokenIntoTuple(dangling_instruction_, false));\n return absl::OkStatus();\n}\nabsl::Status InfeedTokenPropagation::PropagateToken() {\n HloComputation* comp = dangling_instruction_->parent();\n if (comp->IsEntryComputation()) {\n return absl::OkStatus();\n }\n VLOG(2) << \"Propagating tokens for: \" << dangling_instruction_->name();\n HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0];\n if (caller->has_sharding()) {\n return absl::OkStatus();\n }\n if (caller->opcode() == HloOpcode::kConditional) {\n TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller));\n TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch());\n } else if (caller->opcode() == HloOpcode::kWhile &&\n comp == caller->while_body()) {\n TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller));\n TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody());\n } else {\n VLOG(2) << \"Unhandled computation: \" << comp->name();\n return absl::OkStatus();\n }\n return PropagateToken();\n}\nabsl::StatusOr InfeedTokenPropagation::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n VLOG(5) << \"Before InfeedTokenPropagation:\";\n XLA_VLOG_LINES(5, module->ToString());\n std::vector dangling_infeeds;\n std::vector dangling_outfeeds;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n if (!computation->IsEntryComputation()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kInfeed &&\n IsDanglingInfeed(instruction)) {\n VLOG(1) << \"Found dangling infeed: \" << instruction->ToString();\n dangling_infeeds.push_back(instruction);\n } else if (instruction->opcode() == HloOpcode::kOutfeed &&\n IsDanglingOutfeed(instruction)) {\n VLOG(1) << \"Found dangling outfeed: \" << instruction->ToString();\n dangling_outfeeds.push_back(instruction);\n }\n }\n }\n }\n bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty();\n if (changed) {\n call_graph_ = CallGraph::Build(module);\n if (!call_graph_->IsFlattened()) {\n return FailedPrecondition(\n \"Call graph must be flattened before infeed token propagation.\");\n }\n }\n for (HloInstruction* dangling_infeed : dangling_infeeds) {\n dangling_instruction_ = dangling_infeed;\n input_token_ = dangling_infeed->mutable_operand(0);\n output_token_ = dangling_infeed->AddInstruction(\n HloInstruction::CreateGetTupleElement(dangling_infeed, 1));\n TF_RETURN_IF_ERROR(PropagateToken());\n }\n for (HloInstruction* dangling_outfeed : dangling_outfeeds) {\n dangling_instruction_ = dangling_outfeed;\n input_token_ = dangling_outfeed->mutable_operand(1);\n output_token_ = dangling_outfeed;\n TF_RETURN_IF_ERROR(PropagateToken());\n }\n if (changed) {\n TF_RETURN_IF_ERROR(\n TupleSimplifier().Run(module, execution_threads).status());\n TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());\n }\n VLOG(5) << \"After InfeedTokenPropagation:\";\n XLA_VLOG_LINES(5, module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/infeed_token_propagation.h\"\n#include \n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nclass InfeedTokenPropagationTest : public HloTestBase {\n protected:\n InfeedTokenPropagationTest() = default;\n};\nTEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\nENTRY main {\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT gte.0 = get-tuple-element(infeed.0), index=0\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\nENTRY main {\n arg.0 = s32[] parameter(0)\n tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.1 = tuple()\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple()\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple(arg.0)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n const HloInstruction* true_tuple = cond->operand(1);\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n const HloInstruction* false_tuple = cond->operand(2);\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, NonTupleConditional) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = s32[] parameter(0)\n outfeed_tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n pred.0 = pred[] constant(true)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = cond->mutable_operand(1);\n EXPECT_TRUE(true_tuple->shape().IsTuple());\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n ROOT arg.0 = () parameter(0)\n one.0 = s32[] constant(1)\n outfeed_tuple.0 = tuple(one.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\nENTRY main {\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple()\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\nTEST_F(InfeedTokenPropagationTest, WhileInfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n ROOT tuple.0 = tuple()\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1)));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, WhileOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n gte.0 = get-tuple-element(arg.0), index=0\n ROOT tuple.0 = tuple(gte.0)\n}\ncond {\n arg.0 = (s32[]) parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n while_tuple.0 = tuple(arg.0)\n ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(), op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n ROOT arg.0 = () parameter(0)\n one.0 = s32[] constant(1)\n outfeed_tuple.0 = tuple(one.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, NonTupleWhile) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ncomp {\n ROOT arg.0 = s32[] parameter(0)\n tuple.0 = tuple(arg.0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])\n}\ncond {\n arg.0 = s32[] parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n arg.0 = s32[] parameter(0)\n ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_TRUE(loop->shape().IsTuple());\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll()));\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(), op::Outfeed()));\n HloInstruction* body_param = body_comp->parameter_instruction(0);\n EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());\n HloComputation* cond_comp = FindComputation(module.get(), \"cond\");\n HloInstruction* cond_param = cond_comp->parameter_instruction(0);\n EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());\n}\nTEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) {\n constexpr std::string_view hlo = R\"(\nHloModule main\ntrue_comp {\n arg.0 = (s32[]) parameter(0)\n token.0 = after-all()\n outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])\n ROOT tuple.0 = tuple()\n}\nfalse_comp {\n arg.0 = () parameter(0)\n ROOT tuple.0 = tuple()\n}\ncomp {\n arg.0 = () parameter(0)\n token.0 = after-all()\n infeed.0 = (s32[], token[]) infeed(token.0)\n gte.0 = get-tuple-element(infeed.0), index=0\n pred.0 = pred[] constant(true)\n true_tuple.0 = tuple(gte.0)\n false_tuple.0 = tuple()\n ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp\n}\ncond {\n arg.0 = () parameter(0)\n ROOT true.0 = pred[] constant(true)\n}\nENTRY main {\n while_tuple.0 = tuple()\n ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));\n InfeedTokenPropagation itp;\n TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));\n EXPECT_TRUE(changed);\n HloInstruction* loop = FindInstruction(module.get(), \"while.0\");\n EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());\n EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());\n HloInstruction* loop_tuple = FindInstruction(module.get(), \"while_tuple.0\");\n EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());\n EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());\n HloComputation* body_comp = FindComputation(module.get(), \"comp\");\n EXPECT_THAT(body_comp->root_instruction(),\n op::Tuple(op::GetTupleElement(op::Infeed(), 1),\n op::GetTupleElement(op::Conditional(), 0)));\n HloInstruction* cond = FindInstruction(module.get(), \"cond.0\");\n EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);\n EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());\n HloInstruction* true_tuple = FindInstruction(module.get(), \"true_tuple.0\");\n EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);\n EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());\n HloInstruction* false_tuple = FindInstruction(module.get(), \"false_tuple.0\");\n EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);\n HloComputation* true_comp = FindComputation(module.get(), \"true_comp\");\n EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));\n HloComputation* false_comp = FindComputation(module.get(), \"false_comp\");\n EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":220,"cells":{"ID":{"kind":"string","value":"58e77cc6-ed69-45d3-b6b2-27bf9c79df8e"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"future_sender"},"File Path in Repository":{"kind":"string","value":"tensorstore/util/execution/future_sender.h"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/util/execution/future_sender_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_\n#define TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorstore/util/execution/execution.h\"\n#include \"tensorstore/util/future.h\"\nnamespace tensorstore {\nnamespace internal_future {\ntemplate \nstruct IsFutureReceiver : public std::false_type {};\ntemplate \nstruct IsFutureReceiver<\n Receiver, T,\n decltype(execution::set_value(std::declval(),\n std::declval())),\n decltype(execution::set_error(std::declval(),\n std::declval())),\n decltype(execution::set_cancel(std::declval()))>\n : public std::true_type {};\n} \ntemplate \nstd::enable_if_t<(!std::is_const_v &&\n std::is_constructible_v::result_type,\n std::in_place_t, V...>)>\nset_value(const Promise& promise, V&&... v) {\n promise.SetResult(std::in_place, std::forward(v)...);\n}\ntemplate \nstd::enable_if_t<(!std::is_const_v &&\n std::is_constructible_v::result_type,\n std::in_place_t, V...>)>\nset_value(std::reference_wrapper> promise, V&&... v) {\n set_value(promise.get(), std::forward(v)...);\n}\ntemplate \nvoid set_error(const Promise& promise, absl::Status error) {\n promise.SetResult(std::move(error));\n}\ntemplate \nvoid set_error(std::reference_wrapper> promise,\n absl::Status error) {\n set_error(promise.get(), std::move(error));\n}\ntemplate \nvoid set_cancel(const Promise& promise) {\n promise.SetResult(absl::CancelledError(\"\"));\n}\ntemplate \nvoid set_cancel(std::reference_wrapper> promise) {\n set_cancel(promise.get());\n}\ntemplate \nstd::enable_if_t::value> \nsubmit(Future& f, Receiver receiver) {\n f.Force();\n f.ExecuteWhenReady([r = std::move(receiver)](ReadyFuture ready) mutable {\n auto& result = ready.result();\n if (result.has_value()) {\n execution::set_value(r, result.value());\n } else {\n auto status = ready.status();\n if (status.code() == absl::StatusCode::kCancelled) {\n execution::set_cancel(r);\n } else {\n execution::set_error(r, std::move(status));\n }\n }\n });\n}\ntemplate \nstd::enable_if_t::value> \nsubmit(std::reference_wrapper> f, Receiver&& receiver) {\n submit(f.get(), std::forward(receiver));\n}\ntemplate \nFuture MakeSenderFuture(Sender sender) {\n auto pair = PromiseFuturePair::Make();\n struct Callback {\n Sender sender;\n void operator()(Promise promise) { execution::submit(sender, promise); }\n };\n pair.promise.ExecuteWhenForced(Callback{std::move(sender)});\n return pair.future;\n}\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/util/execution/future_sender.h\"\n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorstore/util/execution/any_receiver.h\"\n#include \"tensorstore/util/execution/any_sender.h\"\n#include \"tensorstore/util/execution/execution.h\"\n#include \"tensorstore/util/execution/sender.h\"\n#include \"tensorstore/util/execution/sender_testutil.h\"\n#include \"tensorstore/util/future.h\"\n#include \"tensorstore/util/result.h\"\nnamespace {\nusing ::tensorstore::Promise;\nusing ::tensorstore::PromiseFuturePair;\nusing ::tensorstore::Result;\nTEST(PromiseReceiverTest, SetCancel) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_cancel(pair.promise);\n EXPECT_EQ(pair.future.result(), Result(absl::CancelledError(\"\")));\n}\nTEST(PromiseReceiverTest, AnyReceiverSetCancel) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_cancel(\n tensorstore::AnyReceiver(std::cref(pair.promise)));\n EXPECT_EQ(pair.future.result(), Result(absl::CancelledError(\"\")));\n}\nTEST(PromiseReceiverTest, SetValue) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_value(pair.promise, 3);\n EXPECT_EQ(pair.future.result(), Result(3));\n}\nTEST(PromiseReceiverTest, SetValueThenSetCancel) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_value(pair.promise, 3);\n tensorstore::execution::set_cancel(pair.promise);\n EXPECT_EQ(pair.future.result(), Result(3));\n}\nTEST(PromiseReceiverTest, AnyReceiverSetValue) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_value(\n tensorstore::AnyReceiver(std::cref(pair.promise)), 3);\n EXPECT_EQ(pair.future.result(), Result(3));\n}\nTEST(PromiseReceiverTest, SetError) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_error(\n tensorstore::AnyReceiver(pair.promise),\n absl::UnknownError(\"message\"));\n EXPECT_EQ(pair.future.result(), Result(absl::UnknownError(\"message\")));\n}\nTEST(PromiseReceiverTest, AnyReceiverSetError) {\n auto pair = PromiseFuturePair::Make();\n tensorstore::execution::set_error(std::cref(pair.promise),\n absl::UnknownError(\"message\"));\n EXPECT_EQ(pair.future.result(), Result(absl::UnknownError(\"message\")));\n}\nTEST(FutureSenderTest, SetValue) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log1, log2;\n tensorstore::execution::submit(pair.future,\n tensorstore::LoggingReceiver{&log1});\n tensorstore::execution::submit(pair.future,\n tensorstore::LoggingReceiver{&log2});\n EXPECT_THAT(log1, ::testing::ElementsAre());\n EXPECT_THAT(log2, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(3);\n EXPECT_THAT(log1, ::testing::ElementsAre(\"set_value: 3\"));\n EXPECT_THAT(log2, ::testing::ElementsAre(\"set_value: 3\"));\n}\nTEST(FutureSenderTest, AnySenderSetValue) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log;\n tensorstore::execution::submit(\n tensorstore::AnySender(pair.future),\n tensorstore::LoggingReceiver{&log});\n EXPECT_THAT(log, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(3);\n EXPECT_THAT(log, ::testing::ElementsAre(\"set_value: 3\"));\n}\nTEST(FutureSenderTest, SetError) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log;\n tensorstore::execution::submit(std::ref(pair.future),\n tensorstore::LoggingReceiver{&log});\n EXPECT_THAT(log, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(absl::UnknownError(\"\"));\n EXPECT_THAT(log, ::testing::ElementsAre(\"set_error: UNKNOWN: \"));\n}\nTEST(FutureSenderTest, AnySenderSetError) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log;\n tensorstore::execution::submit(\n tensorstore::AnySender(pair.future),\n tensorstore::LoggingReceiver{&log});\n EXPECT_THAT(log, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(absl::UnknownError(\"\"));\n EXPECT_THAT(log, ::testing::ElementsAre(\"set_error: UNKNOWN: \"));\n}\nTEST(FutureSenderTest, SetCancel) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log;\n tensorstore::execution::submit(pair.future,\n tensorstore::LoggingReceiver{&log});\n EXPECT_THAT(log, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(absl::CancelledError(\"\"));\n EXPECT_THAT(log, ::testing::ElementsAre(\"set_cancel\"));\n}\nTEST(FutureSenderTest, AnySenderSetCancel) {\n auto pair = PromiseFuturePair::Make();\n bool forced = false;\n pair.promise.ExecuteWhenForced([&](Promise) { forced = true; });\n std::vector log;\n tensorstore::execution::submit(\n tensorstore::AnySender(std::ref(pair.future)),\n tensorstore::LoggingReceiver{&log});\n EXPECT_THAT(log, ::testing::ElementsAre());\n EXPECT_TRUE(forced);\n pair.promise.SetResult(absl::CancelledError(\"\"));\n EXPECT_THAT(log, ::testing::ElementsAre(\"set_cancel\"));\n}\nTEST(MakeSenderFutureTest, SetValue) {\n auto future =\n tensorstore::MakeSenderFuture(tensorstore::ValueSender{3});\n EXPECT_FALSE(future.ready());\n EXPECT_EQ(future.result(), Result(3));\n}\nTEST(MakeSenderFutureTest, SetError) {\n auto future = tensorstore::MakeSenderFuture(\n tensorstore::ErrorSender{absl::UnknownError(\"\")});\n EXPECT_FALSE(future.ready());\n EXPECT_EQ(future.result(), Result(absl::UnknownError(\"\")));\n}\nTEST(MakeSenderFutureTest, SetCancel) {\n auto future = tensorstore::MakeSenderFuture(tensorstore::CancelSender{});\n EXPECT_FALSE(future.ready());\n EXPECT_EQ(future.result(), Result(absl::CancelledError(\"\")));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":221,"cells":{"ID":{"kind":"string","value":"d3fa6f16-f4b2-4a17-85df-c9db628b7db2"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tensor_slice_dataset_op"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/kernels/data/tensor_slice_dataset_op.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/tensor_slice_dataset_op.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorflow/core/data/dataset_utils.h\"\n#include \"tensorflow/core/data/global_shuffle_utils.h\"\n#include \"tensorflow/core/data/name_utils.h\"\n#include \"tensorflow/core/data/split_utils.h\"\n#include \"tensorflow/core/framework/dataset.h\"\n#include \"tensorflow/core/framework/partial_tensor_shape.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_util.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/util/batch_util.h\"\n#include \"tsl/platform/mutex.h\"\n#include \"tsl/platform/thread_annotations.h\"\nnamespace tensorflow {\nnamespace data {\n constexpr const char* const TensorSliceDatasetOp::kDatasetType;\n constexpr const char* const TensorSliceDatasetOp::kComponents;\n constexpr const char* const TensorSliceDatasetOp::kToutputTypes;\n constexpr const char* const TensorSliceDatasetOp::kOutputShapes;\n constexpr const char* const TensorSliceDatasetOp::kIsFiles;\n constexpr const char* const\n TensorSliceDatasetOp::kReplicateOnSplit;\nclass TensorSliceDatasetOp::Dataset : public DatasetBase {\n public:\n explicit Dataset(OpKernelContext* ctx, std::vector tensors,\n bool is_files, bool replicate_on_split)\n : DatasetBase(DatasetContext(ctx)),\n tensors_(std::move(tensors)),\n is_files_(is_files),\n replicate_on_split_(replicate_on_split) {\n for (const Tensor& t : tensors_) {\n dtypes_.push_back(t.dtype());\n absl::InlinedVector element_dim_sizes;\n for (int i = 1; i < t.dims(); ++i) {\n element_dim_sizes.push_back(t.dim_size(i));\n }\n partial_shapes_.emplace_back(element_dim_sizes);\n shapes_.emplace_back(std::move(element_dim_sizes));\n }\n }\n std::unique_ptr MakeIteratorInternal(\n const string& prefix) const override {\n return std::make_unique(Iterator::Params{\n this, name_utils::IteratorPrefix(kDatasetType, prefix)});\n }\n Status MakeSplitProviders(std::vector>*\n split_providers) const override {\n split_providers->push_back(\n std::make_unique(tensors_[0].dim_size(0)));\n return absl::OkStatus();\n }\n const DataTypeVector& output_dtypes() const override { return dtypes_; }\n const std::vector& output_shapes() const override {\n return partial_shapes_;\n }\n string DebugString() const override {\n return name_utils::DatasetDebugString(kDatasetType);\n }\n int64_t CardinalityInternal(CardinalityOptions options) const override {\n return tensors_[0].dim_size(0);\n }\n Status InputDatasets(std::vector* inputs) const override {\n return absl::OkStatus();\n }\n Status CheckExternalState() const override { return absl::OkStatus(); }\n Status Get(OpKernelContext* ctx, int64 index,\n std::vector* out_tensors) const override {\n return Get(AnyContext(ctx), index, out_tensors);\n }\n Status Get(AnyContext ctx, int64 index,\n std::vector* out_tensors) const override {\n TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));\n out_tensors->clear();\n out_tensors->reserve(tensors_.size());\n for (int i = 0; i < tensors_.size(); ++i) {\n out_tensors->push_back(MaybeCopySubSlice(tensors_[i], index));\n }\n return absl::OkStatus();\n }\n absl::Status RandomIndexingCompatible() const override {\n return absl::OkStatus();\n }\n protected:\n Status AsGraphDefInternal(SerializationContext* ctx,\n DatasetGraphDefBuilder* b,\n Node** output) const override {\n std::vector components;\n components.reserve(tensors_.size());\n for (const Tensor& t : tensors_) {\n Node* node;\n if (!ctx->is_graph_rewrite()) {\n TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));\n if (is_files_) {\n Node* file_node;\n TF_RETURN_IF_ERROR(\n b->AddIdentity(ctx, \"FileIdentity\", &node, &file_node));\n }\n } else {\n TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));\n DCHECK_NE(ctx->input_list(), nullptr);\n ctx->input_list()->emplace_back(node->name(), t);\n }\n components.emplace_back(node);\n }\n AttrValue dtypes;\n b->BuildAttrValue(dtypes_, &dtypes);\n AttrValue is_files;\n b->BuildAttrValue(is_files_, &is_files);\n AttrValue replicate_on_split;\n b->BuildAttrValue(replicate_on_split_, &replicate_on_split);\n TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},\n {{kToutputTypes, dtypes},\n {kIsFiles, is_files},\n {kReplicateOnSplit, replicate_on_split}},\n output));\n return absl::OkStatus();\n }\n private:\n class Iterator : public DatasetIterator {\n public:\n explicit Iterator(const Params& params)\n : DatasetIterator(params),\n global_shuffle_iterator_(dataset()) {}\n bool SymbolicCheckpointCompatible() const override { return true; }\n Status Initialize(IteratorContext* ctx) override {\n if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {\n split_provider_ = std::make_shared(\n dataset()->tensors_[0].dim_size(0));\n } else {\n TF_ASSIGN_OR_RETURN(split_provider_,\n GetSingleSplitProvider(ctx, dataset()));\n }\n return absl::OkStatus();\n }\n Status GetNextInternal(IteratorContext* ctx,\n std::vector* out_tensors,\n bool* end_of_sequence) override {\n if (ctx->index_mapper() != nullptr) {\n return global_shuffle_iterator_.GetNext(ctx, out_tensors,\n end_of_sequence);\n }\n Tensor split;\n TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));\n if (*end_of_sequence) {\n return absl::OkStatus();\n }\n int64_t index = split.scalar()();\n out_tensors->reserve(dataset()->tensors_.size());\n for (size_t i = 0; i < dataset()->tensors_.size(); ++i) {\n out_tensors->push_back(\n MaybeCopySubSlice(dataset()->tensors_[i], index));\n }\n *end_of_sequence = false;\n return absl::OkStatus();\n }\n protected:\n std::shared_ptr CreateNode(\n IteratorContext* ctx, model::Node::Args args) const override {\n return model::MakeSourceNode(std::move(args));\n }\n Status SaveInternal(SerializationContext* ctx,\n IteratorStateWriter* writer) override {\n TF_RETURN_IF_ERROR(split_provider_->Save(\n [this](const std::string& key) { return full_name(key); }, writer));\n TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));\n return absl::OkStatus();\n }\n Status RestoreInternal(IteratorContext* ctx,\n IteratorStateReader* reader) override {\n if (ctx->restored_element_count().has_value()) {\n return global_shuffle_iterator_.Restore(prefix(), ctx, reader);\n }\n return split_provider_->Restore(\n [this](const std::string& key) { return full_name(key); }, reader);\n }\n private:\n std::shared_ptr split_provider_;\n GlobalShuffleIterator global_shuffle_iterator_;\n };\n const std::vector tensors_;\n DataTypeVector dtypes_;\n std::vector shapes_;\n std::vector partial_shapes_;\n const bool is_files_;\n const bool replicate_on_split_;\n};\nTensorSliceDatasetOp::TensorSliceDatasetOp(OpKernelConstruction* ctx)\n : DatasetOpKernel(ctx) {\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutputTypes, &output_types_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));\n if (ctx->HasAttr(kIsFiles)) {\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kIsFiles, &is_files_));\n }\n if (ctx->HasAttr(kReplicateOnSplit)) {\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));\n }\n}\nvoid TensorSliceDatasetOp::MakeDataset(OpKernelContext* ctx,\n DatasetBase** output) {\n OpInputList inputs;\n OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));\n std::vector components;\n components.reserve(inputs.size());\n OP_REQUIRES(\n ctx, inputs[0].dims() > 0,\n errors::InvalidArgument(\"All components must be at least 1-dimensional\"));\n const int64_t num_slices = inputs[0].dim_size(0);\n for (const Tensor& t : inputs) {\n components.push_back(t);\n OP_REQUIRES(ctx, t.dims() > 0,\n errors::InvalidArgument(\n \"All components must be at least 1-dimensional\"));\n OP_REQUIRES(\n ctx, t.dim_size(0) == num_slices,\n errors::InvalidArgument(\n \"All components must have the same size in the 0th dimension\"));\n }\n *output =\n new Dataset(ctx, std::move(components), is_files_, replicate_on_split_);\n OP_REQUIRES_OK(ctx,\n VerifyTypesMatch((*output)->output_dtypes(), output_types_));\n OP_REQUIRES_OK(\n ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));\n}\nnamespace {\nREGISTER_KERNEL_BUILDER(Name(\"TensorSliceDataset\").Device(DEVICE_CPU),\n TensorSliceDatasetOp);\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/tensor_slice_dataset_op.h\"\n#include \n#include \n#include \"tensorflow/core/data/dataset_test_base.h\"\n#include \"tensorflow/core/data/dataset_utils.h\"\n#include \"tensorflow/core/data/serialization_utils.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nconstexpr char kNodeName[] = \"tensor_slice_dataset\";\nclass TensorSliceDatasetOpTest : public DatasetOpsTestBase {};\nTensorSliceDatasetParams PlainTensorSliceDatasetParams() {\n std::vector components = {\n CreateTensor(TensorShape({2}), {1, 2}),\n CreateTensor(TensorShape({2, 2}), {1, 2, 3, 4}),\n CreateTensor(TensorShape({2}), {2, 3}),\n CreateTensor(TensorShape({2, 2}), {2, 3, 4, 5}),\n CreateTensor(TensorShape({2}), {3, 4}),\n CreateTensor(TensorShape({2, 2}), {3, 4, 5, 6}),\n CreateTensor(TensorShape({2, 1}), {37.0, 38.0}),\n CreateTensor(TensorShape({2, 1}), {\"a\", \"b\"})};\n return {std::move(components), kNodeName};\n}\nTensorSliceDatasetParams NestedTensorSliceDatasetParams() {\n std::vector components = {\n CreateTensor(\n TensorShape({2, 1}),\n {CreateTensor(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0}),\n CreateTensor(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),\n CreateTensor(\n TensorShape({2, 1}),\n {CreateTensor(TensorShape({1, 2}), {\"a\", \"b\"}),\n CreateTensor(TensorShape({1, 2}), {\"c\", \"d\"})}),\n CreateTensor(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6})};\n return {std::move(components), kNodeName};\n}\nstd::vector> GetNextTestCases() {\n return {\n {PlainTensorSliceDatasetParams(),\n {CreateTensor(TensorShape({}), {1}),\n CreateTensor(TensorShape({2}), {1, 2}),\n CreateTensor(TensorShape({}), {2}),\n CreateTensor(TensorShape({2}), {2, 3}),\n CreateTensor(TensorShape({}), {3}),\n CreateTensor(TensorShape({2}), {3, 4}),\n CreateTensor(TensorShape({1}), {37.0}),\n CreateTensor(TensorShape({1}), {\"a\"}),\n CreateTensor(TensorShape({}), {2}),\n CreateTensor(TensorShape({2}), {3, 4}),\n CreateTensor(TensorShape({}), {3}),\n CreateTensor(TensorShape({2}), {4, 5}),\n CreateTensor(TensorShape({}), {4}),\n CreateTensor(TensorShape({2}), {5, 6}),\n CreateTensor(TensorShape({1}), {38.0}),\n CreateTensor(TensorShape({1}), {\"b\"})}},\n {NestedTensorSliceDatasetParams(),\n {CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({1, 2}), {\"a\", \"b\"})}),\n CreateTensor(TensorShape({3}), {1, 2, 3}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({1, 2}), {\"c\", \"d\"})}),\n CreateTensor(TensorShape({3}), {4, 5, 6})}}};\n}\nclass ParameterizedGetNextTest\n : public TensorSliceDatasetOpTest,\n public ::testing::WithParamInterface<\n GetNextTestCase> {};\nTEST_P(ParameterizedGetNextTest, GetNext) {\n auto test_case = GetParam();\n TF_ASSERT_OK(Initialize(test_case.dataset_params));\n std::vector input_names;\n TF_ASSERT_OK(test_case.dataset_params.GetInputNames(&input_names));\n size_t num_tensors_per_slice = input_names.size();\n bool end_of_sequence = false;\n std::vector out_tensors;\n int cur_slice = 0;\n while (true) {\n TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,\n &end_of_sequence));\n if (end_of_sequence) {\n EXPECT_TRUE(out_tensors.empty());\n break;\n }\n for (int i = 0; i < out_tensors.size(); ++i) {\n EXPECT_LT(i + num_tensors_per_slice * cur_slice,\n test_case.expected_outputs.size());\n if (out_tensors[i].dtype() == DT_VARIANT) {\n const Tensor* output = out_tensors[i].scalar()().get();\n const Tensor* expected_output =\n test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]\n .scalar()()\n .get();\n TF_EXPECT_OK(ExpectEqual(*output, *expected_output));\n } else {\n TF_EXPECT_OK(ExpectEqual(\n out_tensors[i],\n test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]));\n }\n }\n cur_slice++;\n }\n}\nINSTANTIATE_TEST_SUITE_P(TensorSliceDatasetOpTest, ParameterizedGetNextTest,\n ::testing::ValuesIn(GetNextTestCases()));\nTEST_F(TensorSliceDatasetOpTest, DatasetNodeName) {\n auto dataset_params = PlainTensorSliceDatasetParams();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));\n}\nTEST_F(TensorSliceDatasetOpTest, DatasetTypeString) {\n auto dataset_params = PlainTensorSliceDatasetParams();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetTypeString(\n name_utils::OpName(TensorSliceDatasetOp::kDatasetType)));\n}\nstd::vector>\nDatasetOutputTypesTestCases() {\n return {{PlainTensorSliceDatasetParams(),\n PlainTensorSliceDatasetParams().output_dtypes()},\n {NestedTensorSliceDatasetParams(),\n NestedTensorSliceDatasetParams().output_dtypes()}};\n}\nDATASET_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,\n DatasetOutputTypesTestCases())\nstd::vector>\nDatasetOutputShapesTestCases() {\n return {{PlainTensorSliceDatasetParams(),\n PlainTensorSliceDatasetParams().output_shapes()},\n {NestedTensorSliceDatasetParams(),\n NestedTensorSliceDatasetParams().output_shapes()}};\n}\nDATASET_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,\n DatasetOutputShapesTestCases())\nstd::vector>\nDatasetCardinalityTestCases() {\n return {{PlainTensorSliceDatasetParams(), 2},\n {NestedTensorSliceDatasetParams(), 2}};\n}\nDATASET_CARDINALITY_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,\n DatasetCardinalityTestCases())\nstd::vector>\nIteratorOutputTypesTestCases() {\n return {{PlainTensorSliceDatasetParams(),\n PlainTensorSliceDatasetParams().output_dtypes()},\n {NestedTensorSliceDatasetParams(),\n NestedTensorSliceDatasetParams().output_dtypes()}};\n}\nITERATOR_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest,\n TensorSliceDatasetParams,\n IteratorOutputTypesTestCases())\nstd::vector>\nIteratorOutputShapesTestCases() {\n return {{PlainTensorSliceDatasetParams(),\n PlainTensorSliceDatasetParams().output_shapes()},\n {NestedTensorSliceDatasetParams(),\n NestedTensorSliceDatasetParams().output_shapes()}};\n}\nITERATOR_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest,\n TensorSliceDatasetParams,\n IteratorOutputShapesTestCases())\nTEST_F(TensorSliceDatasetOpTest, IteratorOutputPrefix) {\n auto dataset_params = PlainTensorSliceDatasetParams();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(\n TensorSliceDatasetOp::kDatasetType, dataset_params.iterator_prefix())));\n}\nstd::vector>\nIteratorSaveAndRestoreTestCases() {\n return {\n {PlainTensorSliceDatasetParams(),\n {0, 1, 2},\n {CreateTensor(TensorShape({}), {1}),\n CreateTensor(TensorShape({2}), {1, 2}),\n CreateTensor(TensorShape({}), {2}),\n CreateTensor(TensorShape({2}), {2, 3}),\n CreateTensor(TensorShape({}), {3}),\n CreateTensor(TensorShape({2}), {3, 4}),\n CreateTensor(TensorShape({1}), {37.0}),\n CreateTensor(TensorShape({1}), {\"a\"}),\n CreateTensor(TensorShape({}), {2}),\n CreateTensor(TensorShape({2}), {3, 4}),\n CreateTensor(TensorShape({}), {3}),\n CreateTensor(TensorShape({2}), {4, 5}),\n CreateTensor(TensorShape({}), {4}),\n CreateTensor(TensorShape({2}), {5, 6}),\n CreateTensor(TensorShape({1}), {38.0}),\n CreateTensor(TensorShape({1}), {\"b\"})}},\n {NestedTensorSliceDatasetParams(),\n {0, 1, 2},\n {CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({1, 2}), {\"a\", \"b\"})}),\n CreateTensor(TensorShape({3}), {1, 2, 3}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),\n CreateTensor(\n TensorShape({1}),\n {CreateTensor(TensorShape({1, 2}), {\"c\", \"d\"})}),\n CreateTensor(TensorShape({3}), {4, 5, 6})}}};\n}\nclass ParameterizedIteratorSaveAndRestoreTest\n : public TensorSliceDatasetOpTest,\n public ::testing::WithParamInterface<\n IteratorSaveAndRestoreTestCase> {};\nTEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {\n auto test_case = GetParam();\n TF_ASSERT_OK(Initialize(test_case.dataset_params));\n std::unique_ptr serialization_context;\n TF_ASSERT_OK(CreateSerializationContext(&serialization_context));\n int cur_iteration = 0;\n bool end_of_sequence = false;\n auto params =\n static_cast(test_case.dataset_params);\n int64_t num_slices = params.num_slices();\n size_t num_tensors_per_slice = params.num_tensors_per_slice();\n std::vector out_tensors;\n const std::vector& breakpoints = test_case.breakpoints;\n for (int breakpoint : breakpoints) {\n while (cur_iteration < breakpoint) {\n TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,\n &end_of_sequence));\n cur_iteration++;\n }\n if (breakpoint == 0) {\n EXPECT_FALSE(end_of_sequence);\n } else if (breakpoint <= num_slices) {\n for (int i = 0; i < out_tensors.size(); ++i) {\n if (out_tensors[i].dtype() == DT_VARIANT) {\n const Tensor* output =\n out_tensors[i].scalar()().get();\n const Tensor* expected_output =\n test_case\n .expected_outputs[i +\n num_tensors_per_slice * (cur_iteration - 1)]\n .scalar()()\n .get();\n TF_EXPECT_OK(ExpectEqual(*output, *expected_output));\n } else {\n TF_EXPECT_OK(ExpectEqual(\n out_tensors[i],\n test_case.expected_outputs[i + num_tensors_per_slice *\n (cur_iteration - 1)]));\n }\n }\n } else {\n EXPECT_TRUE(end_of_sequence);\n }\n VariantTensorDataWriter writer;\n TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));\n std::vector data;\n writer.GetData(&data);\n VariantTensorDataReader reader(data);\n TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, \"Iterator\",\n *dataset_, &iterator_));\n }\n}\nINSTANTIATE_TEST_SUITE_P(\n TensorSliceDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,\n ::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));\nTEST_F(TensorSliceDatasetOpTest, SplitProvider) {\n auto params = TensorSliceDatasetParams(\n CreateTensors(TensorShape({7}), {{6, 2, 3, 8, 7, 0, 10}}),\n kNodeName);\n TF_ASSERT_OK(InitializeRuntime(params));\n TF_EXPECT_OK(CheckSplitProviderFullIteration(\n params, CreateTensors(TensorShape({}),\n {{6}, {2}, {3}, {8}, {7}, {0}, {10}})));\n TF_EXPECT_OK(CheckSplitProviderShardedIteration(\n params, 3, 1,\n CreateTensors(TensorShape({}), {{2}, {7}})));\n}\nTEST_F(TensorSliceDatasetOpTest, SplitProviderEmpty) {\n auto params = TensorSliceDatasetParams(\n CreateTensors(TensorShape({0}), {{}}), kNodeName);\n TF_ASSERT_OK(InitializeRuntime(params));\n TF_EXPECT_OK(CheckSplitProviderFullIteration(\n params, CreateTensors(TensorShape({}), {})));\n TF_EXPECT_OK(CheckSplitProviderShardedIteration(\n params, 3, 1,\n CreateTensors(TensorShape({}), {})));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":222,"cells":{"ID":{"kind":"string","value":"f845cd84-f662-46bb-a6e6-300d4c18375b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"quic_lru_cache"},"File Path in Repository":{"kind":"string","value":"quiche/quic/core/quic_lru_cache.h"},"File Path for Unit Test":{"kind":"string","value":"quiche/quic/core/quic_lru_cache_test.cc"},"Code":{"kind":"string","value":"#ifndef QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_\n#define QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_\n#include \n#include \"quiche/quic/platform/api/quic_export.h\"\n#include \"quiche/quic/platform/api/quic_flag_utils.h\"\n#include \"quiche/quic/platform/api/quic_flags.h\"\n#include \"quiche/quic/platform/api/quic_logging.h\"\n#include \"quiche/common/quiche_linked_hash_map.h\"\nnamespace quic {\ntemplate ,\n class Eq = std::equal_to>\nclass QUICHE_EXPORT QuicLRUCache {\n private:\n using HashMapType =\n typename quiche::QuicheLinkedHashMap, Hash, Eq>;\n public:\n using iterator = typename HashMapType::iterator;\n using const_iterator = typename HashMapType::const_iterator;\n using reverse_iterator = typename HashMapType::reverse_iterator;\n using const_reverse_iterator = typename HashMapType::const_reverse_iterator;\n explicit QuicLRUCache(size_t capacity) : capacity_(capacity) {}\n QuicLRUCache(const QuicLRUCache&) = delete;\n QuicLRUCache& operator=(const QuicLRUCache&) = delete;\n iterator begin() { return cache_.begin(); }\n const_iterator begin() const { return cache_.begin(); }\n iterator end() { return cache_.end(); }\n const_iterator end() const { return cache_.end(); }\n reverse_iterator rbegin() { return cache_.rbegin(); }\n const_reverse_iterator rbegin() const { return cache_.rbegin(); }\n reverse_iterator rend() { return cache_.rend(); }\n const_reverse_iterator rend() const { return cache_.rend(); }\n void Insert(const K& key, std::unique_ptr value) {\n auto it = cache_.find(key);\n if (it != cache_.end()) {\n cache_.erase(it);\n }\n cache_.emplace(key, std::move(value));\n if (cache_.size() > capacity_) {\n cache_.pop_front();\n }\n QUICHE_DCHECK_LE(cache_.size(), capacity_);\n }\n iterator Lookup(const K& key) {\n auto iter = cache_.find(key);\n if (iter == cache_.end()) {\n return iter;\n }\n std::unique_ptr value = std::move(iter->second);\n cache_.erase(iter);\n auto result = cache_.emplace(key, std::move(value));\n QUICHE_DCHECK(result.second);\n return result.first;\n }\n iterator Erase(iterator iter) { return cache_.erase(iter); }\n void Clear() { cache_.clear(); }\n size_t MaxSize() const { return capacity_; }\n size_t Size() const { return cache_.size(); }\n private:\n quiche::QuicheLinkedHashMap, Hash, Eq> cache_;\n const size_t capacity_;\n};\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/quic/core/quic_lru_cache.h\"\n#include \n#include \n#include \"quiche/quic/platform/api/quic_test.h\"\nnamespace quic {\nnamespace test {\nnamespace {\nstruct CachedItem {\n explicit CachedItem(uint32_t new_value) : value(new_value) {}\n uint32_t value;\n};\nTEST(QuicLRUCacheTest, InsertAndLookup) {\n QuicLRUCache cache(5);\n EXPECT_EQ(cache.end(), cache.Lookup(1));\n EXPECT_EQ(0u, cache.Size());\n EXPECT_EQ(5u, cache.MaxSize());\n std::unique_ptr item1(new CachedItem(11));\n cache.Insert(1, std::move(item1));\n EXPECT_EQ(1u, cache.Size());\n EXPECT_EQ(11u, cache.Lookup(1)->second->value);\n std::unique_ptr item2(new CachedItem(12));\n cache.Insert(1, std::move(item2));\n EXPECT_EQ(1u, cache.Size());\n EXPECT_EQ(12u, cache.Lookup(1)->second->value);\n std::unique_ptr item3(new CachedItem(13));\n cache.Insert(3, std::move(item3));\n EXPECT_EQ(2u, cache.Size());\n auto iter = cache.Lookup(3);\n ASSERT_NE(cache.end(), iter);\n EXPECT_EQ(13u, iter->second->value);\n cache.Erase(iter);\n ASSERT_EQ(cache.end(), cache.Lookup(3));\n EXPECT_EQ(1u, cache.Size());\n cache.Clear();\n EXPECT_EQ(0u, cache.Size());\n}\nTEST(QuicLRUCacheTest, Eviction) {\n QuicLRUCache cache(3);\n for (size_t i = 1; i <= 4; ++i) {\n std::unique_ptr item(new CachedItem(10 + i));\n cache.Insert(i, std::move(item));\n }\n EXPECT_EQ(3u, cache.Size());\n EXPECT_EQ(3u, cache.MaxSize());\n EXPECT_EQ(cache.end(), cache.Lookup(1));\n EXPECT_EQ(14u, cache.Lookup(4)->second->value);\n EXPECT_EQ(12u, cache.Lookup(2)->second->value);\n std::unique_ptr item5(new CachedItem(15));\n cache.Insert(5, std::move(item5));\n EXPECT_EQ(cache.end(), cache.Lookup(3));\n EXPECT_EQ(15u, cache.Lookup(5)->second->value);\n cache.Clear();\n EXPECT_EQ(0u, cache.Size());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":223,"cells":{"ID":{"kind":"string","value":"1e57cfe2-0c5a-4707-a50b-7bf3a0666fa5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"uniform_helper"},"File Path in Repository":{"kind":"string","value":"absl/random/internal/uniform_helper.h"},"File Path for Unit Test":{"kind":"string","value":"absl/random/internal/uniform_helper_test.cc"},"Code":{"kind":"string","value":"#ifndef ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_\n#define ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_\n#include \n#include \n#include \n#include \"absl/base/config.h\"\n#include \"absl/meta/type_traits.h\"\n#include \"absl/random/internal/traits.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\ntemplate \nclass uniform_int_distribution;\ntemplate \nclass uniform_real_distribution;\nnamespace random_internal {\ntemplate \nstruct TagTypeCompare {};\ntemplate \nconstexpr bool operator==(TagTypeCompare, TagTypeCompare) {\n return true;\n}\ntemplate \nconstexpr bool operator!=(TagTypeCompare, TagTypeCompare) {\n return false;\n}\n} \nstruct IntervalClosedClosedTag\n : public random_internal::TagTypeCompare {};\nstruct IntervalClosedOpenTag\n : public random_internal::TagTypeCompare {};\nstruct IntervalOpenClosedTag\n : public random_internal::TagTypeCompare {};\nstruct IntervalOpenOpenTag\n : public random_internal::TagTypeCompare {};\nnamespace random_internal {\ntemplate \nusing uniform_inferred_return_t =\n absl::enable_if_t,\n is_widening_convertible>::value,\n typename std::conditional<\n is_widening_convertible::value, B, A>::type>;\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n IsIntegral,\n absl::disjunction,\n std::is_same>>::value,\n IntType>\nuniform_lower_bound(Tag, IntType a, IntType) {\n return a < (std::numeric_limits::max)() ? (a + 1) : a;\n}\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n std::is_floating_point,\n absl::disjunction,\n std::is_same>>::value,\n FloatType>\nuniform_lower_bound(Tag, FloatType a, FloatType b) {\n return std::nextafter(a, b);\n}\ntemplate \ntypename absl::enable_if_t<\n absl::disjunction,\n std::is_same>::value,\n NumType>\nuniform_lower_bound(Tag, NumType a, NumType) {\n return a;\n}\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n IsIntegral,\n absl::disjunction,\n std::is_same>>::value,\n IntType>\nuniform_upper_bound(Tag, IntType, IntType b) {\n return b > (std::numeric_limits::min)() ? (b - 1) : b;\n}\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n std::is_floating_point,\n absl::disjunction,\n std::is_same>>::value,\n FloatType>\nuniform_upper_bound(Tag, FloatType, FloatType b) {\n return b;\n}\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n IsIntegral,\n absl::disjunction,\n std::is_same>>::value,\n IntType>\nuniform_upper_bound(Tag, IntType, IntType b) {\n return b;\n}\ntemplate \ntypename absl::enable_if_t<\n absl::conjunction<\n std::is_floating_point,\n absl::disjunction,\n std::is_same>>::value,\n FloatType>\nuniform_upper_bound(Tag, FloatType, FloatType b) {\n return std::nextafter(b, (std::numeric_limits::max)());\n}\ntemplate \nabsl::enable_if_t::value, bool>\nis_uniform_range_valid(FloatType a, FloatType b) {\n return a <= b && std::isfinite(b - a);\n}\ntemplate \nabsl::enable_if_t::value, bool>\nis_uniform_range_valid(IntType a, IntType b) {\n return a <= b;\n}\ntemplate \nusing UniformDistribution =\n typename std::conditional::value,\n absl::uniform_int_distribution,\n absl::uniform_real_distribution>::type;\ntemplate \nstruct UniformDistributionWrapper : public UniformDistribution {\n template \n explicit UniformDistributionWrapper(TagType, NumType lo, NumType hi)\n : UniformDistribution(\n uniform_lower_bound(TagType{}, lo, hi),\n uniform_upper_bound(TagType{}, lo, hi)) {}\n explicit UniformDistributionWrapper(NumType lo, NumType hi)\n : UniformDistribution(\n uniform_lower_bound(IntervalClosedOpenTag(), lo, hi),\n uniform_upper_bound(IntervalClosedOpenTag(), lo, hi)) {}\n explicit UniformDistributionWrapper()\n : UniformDistribution(std::numeric_limits::lowest(),\n (std::numeric_limits::max)()) {}\n};\n} \nABSL_NAMESPACE_END\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/random/internal/uniform_helper.h\"\n#include \n#include \n#include \n#include \"gtest/gtest.h\"\nnamespace {\nusing absl::IntervalClosedClosedTag;\nusing absl::IntervalClosedOpenTag;\nusing absl::IntervalOpenClosedTag;\nusing absl::IntervalOpenOpenTag;\nusing absl::random_internal::uniform_inferred_return_t;\nusing absl::random_internal::uniform_lower_bound;\nusing absl::random_internal::uniform_upper_bound;\nclass UniformHelperTest : public testing::Test {};\nTEST_F(UniformHelperTest, UniformBoundFunctionsGeneral) {\n constexpr IntervalClosedClosedTag IntervalClosedClosed;\n constexpr IntervalClosedOpenTag IntervalClosedOpen;\n constexpr IntervalOpenClosedTag IntervalOpenClosed;\n constexpr IntervalOpenOpenTag IntervalOpenOpen;\n EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, 0, 100), 1);\n EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, 0, 100), 1);\n EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 0, 1.0), 0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenOpen, 0, 1.0), 0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 0, 1.0), 0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenOpen, 0, 1.0), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 100), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 100), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 1.0), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 1.0), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 1.0), 0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 1.0), 0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 100), 99);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 100), 99);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 1.0), 1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 1.0), 1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 1.0), 1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 1.0), 1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, 0, 100), 100);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, 0, 100), 100);\n EXPECT_GT(uniform_upper_bound(IntervalOpenClosed, 0, 1.0), 1.0);\n EXPECT_GT(uniform_upper_bound(IntervalClosedClosed, 0, 1.0), 1.0);\n EXPECT_GT(uniform_upper_bound(IntervalOpenClosed, 0, 1.0), 1.0);\n EXPECT_GT(uniform_upper_bound(IntervalClosedClosed, 0, 1.0), 1.0);\n EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, -100, -1), -99);\n EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, -100, -1), -99);\n EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, -2.0, -1.0), -2.0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenOpen, -2.0, -1.0), -2.0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, -2.0, -1.0), -2.0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenOpen, -2.0, -1.0), -2.0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -100, -1), -100);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -100, -1), -100);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -2.0, -1.0), -2.0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -2.0, -1.0), -2.0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -2.0, -1.0),\n -2.0);\n EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -2.0, -1.0), -2.0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -100, -1), -2);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -100, -1), -2);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -2.0, -1.0), -1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -2.0, -1.0), -1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -2.0, -1.0), -1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -2.0, -1.0), -1.0);\n EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, -100, -1), -1);\n EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, -100, -1), -1);\n EXPECT_GT(uniform_upper_bound(IntervalOpenClosed, -2.0, -1.0), -1.0);\n EXPECT_GT(uniform_upper_bound(IntervalClosedClosed, -2.0, -1.0), -1.0);\n EXPECT_GT(uniform_upper_bound(IntervalOpenClosed, -2.0, -1.0), -1.0);\n EXPECT_GT(uniform_upper_bound(IntervalClosedClosed, -2.0, -1.0),\n -1.0);\n EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 1.0, 2.0), 1.0);\n EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, +0.0), 1.0);\n EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -0.0), 1.0);\n EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0), 1.0);\n}\nTEST_F(UniformHelperTest, UniformBoundFunctionsIntBounds) {\n constexpr IntervalOpenOpenTag IntervalOpenOpen;\n constexpr auto m = (std::numeric_limits::max)();\n EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0u, 0u));\n EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m, m));\n EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m - 1, m - 1));\n EXPECT_EQ(0, uniform_upper_bound(IntervalOpenOpen, 0u, 0u));\n EXPECT_EQ(m - 1, uniform_upper_bound(IntervalOpenOpen, m, m));\n constexpr auto l = (std::numeric_limits::min)();\n constexpr auto r = (std::numeric_limits::max)();\n EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0, 0));\n EXPECT_EQ(l + 1, uniform_lower_bound(IntervalOpenOpen, l, l));\n EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r - 1, r - 1));\n EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r, r));\n EXPECT_EQ(-1, uniform_upper_bound(IntervalOpenOpen, 0, 0));\n EXPECT_EQ(l, uniform_upper_bound(IntervalOpenOpen, l, l));\n EXPECT_EQ(r - 1, uniform_upper_bound(IntervalOpenOpen, r, r));\n}\nTEST_F(UniformHelperTest, UniformBoundFunctionsRealBounds) {\n constexpr IntervalOpenClosedTag IntervalOpenClosed;\n EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, 1.0, 1.0));\n EXPECT_EQ(1.0f, uniform_lower_bound(IntervalOpenClosed, 1.0f, 1.0f));\n constexpr auto r = (std::numeric_limits::max)();\n const auto re = std::nexttoward(r, 0.0);\n constexpr auto l = -r;\n const auto le = std::nexttoward(l, 0.0);\n EXPECT_EQ(l, uniform_lower_bound(IntervalOpenClosed, l, l)); \n EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, r, r)); \n EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, r)); \n EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, 0.0)); \n EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, le)); \n EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, re, r)); \n EXPECT_EQ(le, uniform_upper_bound(IntervalOpenClosed, l, l)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, r, r)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, r)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, re)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, 0.0, r)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, re, r)); \n EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, le, re)); \n const double e = std::nextafter(1.0, 2.0); \n const double f = std::nextafter(1.0, 0.0); \n EXPECT_EQ(e, uniform_lower_bound(IntervalOpenClosed, 1.0, e));\n EXPECT_EQ(std::nextafter(e, 2.0),\n uniform_upper_bound(IntervalOpenClosed, 1.0, e));\n EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, f, 1.0));\n EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, f, 1.0));\n const double g = std::numeric_limits::denorm_min();\n const double h = std::nextafter(g, 1.0);\n EXPECT_EQ(g, uniform_lower_bound(IntervalOpenClosed, 0.0, g));\n EXPECT_EQ(h, uniform_upper_bound(IntervalOpenClosed, 0.0, g));\n EXPECT_EQ(h, uniform_lower_bound(IntervalOpenClosed, g, 1.0));\n EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, g, 1.0));\n EXPECT_EQ(f, uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0));\n}\nstruct Invalid {};\ntemplate \nauto InferredUniformReturnT(int) -> uniform_inferred_return_t;\ntemplate \nInvalid InferredUniformReturnT(...);\ntemplate \nvoid CheckArgsInferType() {\n static_assert(\n absl::conjunction<\n std::is_same(0))>,\n std::is_same(0))>>::value,\n \"\");\n}\nTEST_F(UniformHelperTest, UniformTypeInference) {\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n CheckArgsInferType();\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper.h"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":224,"cells":{"ID":{"kind":"string","value":"d9e7938a-62ab-44dd-9d1c-26c951320ef7"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"crc32c"},"File Path in Repository":{"kind":"string","value":"absl/crc/crc32c.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/crc/crc32c_test.cc"},"Code":{"kind":"string","value":"#include \"absl/crc/crc32c.h\"\n#include \n#include \"absl/crc/internal/crc.h\"\n#include \"absl/crc/internal/crc32c.h\"\n#include \"absl/crc/internal/crc_memcpy.h\"\n#include \"absl/strings/string_view.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace {\nconst crc_internal::CRC* CrcEngine() {\n static const crc_internal::CRC* engine = crc_internal::CRC::Crc32c();\n return engine;\n}\nconstexpr uint32_t kCRC32Xor = 0xffffffffU;\n} \nnamespace crc_internal {\ncrc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {\n uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor;\n CrcEngine()->UnextendByZeroes(&crc, length);\n return static_cast(crc ^ kCRC32Xor);\n}\ncrc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,\n absl::string_view buf_to_add) {\n uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor;\n CrcEngine()->Extend(&crc, buf_to_add.data(), buf_to_add.size());\n return static_cast(crc ^ kCRC32Xor);\n}\n} \ncrc32c_t ComputeCrc32c(absl::string_view buf) {\n return ExtendCrc32c(crc32c_t{0}, buf);\n}\ncrc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {\n uint32_t crc = static_cast(initial_crc) ^ kCRC32Xor;\n CrcEngine()->ExtendByZeroes(&crc, length);\n return static_cast(crc ^ kCRC32Xor);\n}\ncrc32c_t ConcatCrc32c(crc32c_t lhs_crc, crc32c_t rhs_crc, size_t rhs_len) {\n uint32_t result = static_cast(lhs_crc);\n CrcEngine()->ExtendByZeroes(&result, rhs_len);\n return crc32c_t{result ^ static_cast(rhs_crc)};\n}\ncrc32c_t RemoveCrc32cPrefix(crc32c_t crc_a, crc32c_t crc_ab, size_t length_b) {\n return ConcatCrc32c(crc_a, crc_ab, length_b);\n}\ncrc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count,\n crc32c_t initial_crc) {\n return static_cast(\n crc_internal::Crc32CAndCopy(dest, src, count, initial_crc, false));\n}\ncrc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,\n size_t suffix_len) {\n uint32_t result = static_cast(full_string_crc) ^\n static_cast(suffix_crc);\n CrcEngine()->UnextendByZeroes(&result, suffix_len);\n return crc32c_t{result};\n}\nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/crc/crc32c.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"gtest/gtest.h\"\n#include \"absl/crc/internal/crc32c.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\nnamespace {\nTEST(CRC32C, RFC3720) {\n char data[32];\n memset(data, 0, sizeof(data));\n EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),\n absl::crc32c_t{0x8a9136aa});\n memset(data, 0xff, sizeof(data));\n EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),\n absl::crc32c_t{0x62a8ab43});\n for (int i = 0; i < 32; ++i) data[i] = static_cast(i);\n EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),\n absl::crc32c_t{0x46dd794e});\n for (int i = 0; i < 32; ++i) data[i] = static_cast(31 - i);\n EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),\n absl::crc32c_t{0x113fdb5c});\n constexpr uint8_t cmd[48] = {\n 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,\n 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n };\n EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(\n reinterpret_cast(cmd), sizeof(cmd))),\n absl::crc32c_t{0xd9963a56});\n}\nstd::string TestString(size_t len) {\n std::string result;\n result.reserve(len);\n for (size_t i = 0; i < len; ++i) {\n result.push_back(static_cast(i % 256));\n }\n return result;\n}\nTEST(CRC32C, Compute) {\n EXPECT_EQ(absl::ComputeCrc32c(\"\"), absl::crc32c_t{0});\n EXPECT_EQ(absl::ComputeCrc32c(\"hello world\"), absl::crc32c_t{0xc99465aa});\n}\nTEST(CRC32C, Extend) {\n uint32_t base = 0xC99465AA; \n std::string extension = \"Extension String\";\n EXPECT_EQ(\n absl::ExtendCrc32c(absl::crc32c_t{base}, extension),\n absl::crc32c_t{0xD2F65090}); \n}\nTEST(CRC32C, ExtendByZeroes) {\n std::string base = \"hello world\";\n absl::crc32c_t base_crc = absl::crc32c_t{0xc99465aa};\n constexpr size_t kExtendByValues[] = {100, 10000, 100000};\n for (const size_t extend_by : kExtendByValues) {\n SCOPED_TRACE(extend_by);\n absl::crc32c_t crc2 = absl::ExtendCrc32cByZeroes(base_crc, extend_by);\n EXPECT_EQ(crc2, absl::ComputeCrc32c(base + std::string(extend_by, '\\0')));\n }\n}\nTEST(CRC32C, UnextendByZeroes) {\n constexpr size_t kExtendByValues[] = {2, 200, 20000, 200000, 20000000};\n constexpr size_t kUnextendByValues[] = {0, 100, 10000, 100000, 10000000};\n for (auto seed_crc : {absl::crc32c_t{0}, absl::crc32c_t{0xc99465aa}}) {\n SCOPED_TRACE(seed_crc);\n for (const size_t size_1 : kExtendByValues) {\n for (const size_t size_2 : kUnextendByValues) {\n size_t extend_size = std::max(size_1, size_2);\n size_t unextend_size = std::min(size_1, size_2);\n SCOPED_TRACE(extend_size);\n SCOPED_TRACE(unextend_size);\n absl::crc32c_t crc1 = seed_crc;\n crc1 = absl::ExtendCrc32cByZeroes(crc1, extend_size);\n crc1 = absl::crc_internal::UnextendCrc32cByZeroes(crc1, unextend_size);\n absl::crc32c_t crc2 = seed_crc;\n crc2 = absl::ExtendCrc32cByZeroes(crc2, extend_size - unextend_size);\n EXPECT_EQ(crc1, crc2);\n }\n }\n }\n constexpr size_t kSizes[] = {0, 1, 100, 10000};\n for (const size_t size : kSizes) {\n SCOPED_TRACE(size);\n std::string string_before = TestString(size);\n std::string string_after = string_before + std::string(size, '\\0');\n absl::crc32c_t crc_before = absl::ComputeCrc32c(string_before);\n absl::crc32c_t crc_after = absl::ComputeCrc32c(string_after);\n EXPECT_EQ(crc_before,\n absl::crc_internal::UnextendCrc32cByZeroes(crc_after, size));\n }\n}\nTEST(CRC32C, Concat) {\n std::string hello = \"Hello, \";\n std::string world = \"world!\";\n std::string hello_world = absl::StrCat(hello, world);\n absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);\n absl::crc32c_t crc_b = absl::ComputeCrc32c(world);\n absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);\n EXPECT_EQ(absl::ConcatCrc32c(crc_a, crc_b, world.size()), crc_ab);\n}\nTEST(CRC32C, Memcpy) {\n constexpr size_t kBytesSize[] = {0, 1, 20, 500, 100000};\n for (size_t bytes : kBytesSize) {\n SCOPED_TRACE(bytes);\n std::string sample_string = TestString(bytes);\n std::string target_buffer = std::string(bytes, '\\0');\n absl::crc32c_t memcpy_crc =\n absl::MemcpyCrc32c(&(target_buffer[0]), sample_string.data(), bytes);\n absl::crc32c_t compute_crc = absl::ComputeCrc32c(sample_string);\n EXPECT_EQ(memcpy_crc, compute_crc);\n EXPECT_EQ(sample_string, target_buffer);\n }\n}\nTEST(CRC32C, RemovePrefix) {\n std::string hello = \"Hello, \";\n std::string world = \"world!\";\n std::string hello_world = absl::StrCat(hello, world);\n absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);\n absl::crc32c_t crc_b = absl::ComputeCrc32c(world);\n absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);\n EXPECT_EQ(absl::RemoveCrc32cPrefix(crc_a, crc_ab, world.size()), crc_b);\n}\nTEST(CRC32C, RemoveSuffix) {\n std::string hello = \"Hello, \";\n std::string world = \"world!\";\n std::string hello_world = absl::StrCat(hello, world);\n absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);\n absl::crc32c_t crc_b = absl::ComputeCrc32c(world);\n absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);\n EXPECT_EQ(absl::RemoveCrc32cSuffix(crc_ab, crc_b, world.size()), crc_a);\n}\nTEST(CRC32C, InsertionOperator) {\n {\n std::ostringstream buf;\n buf << absl::crc32c_t{0xc99465aa};\n EXPECT_EQ(buf.str(), \"c99465aa\");\n }\n {\n std::ostringstream buf;\n buf << absl::crc32c_t{0};\n EXPECT_EQ(buf.str(), \"00000000\");\n }\n {\n std::ostringstream buf;\n buf << absl::crc32c_t{17};\n EXPECT_EQ(buf.str(), \"00000011\");\n }\n}\nTEST(CRC32C, AbslStringify) {\n EXPECT_EQ(absl::StrFormat(\"%v\", absl::crc32c_t{0xc99465aa}), \"c99465aa\");\n EXPECT_EQ(absl::StrFormat(\"%v\", absl::crc32c_t{0}), \"00000000\");\n EXPECT_EQ(absl::StrFormat(\"%v\", absl::crc32c_t{17}), \"00000011\");\n EXPECT_EQ(absl::StrCat(absl::crc32c_t{0xc99465aa}), \"c99465aa\");\n EXPECT_EQ(absl::StrCat(absl::crc32c_t{0}), \"00000000\");\n EXPECT_EQ(absl::StrCat(absl::crc32c_t{17}), \"00000011\");\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":225,"cells":{"ID":{"kind":"string","value":"ffc07e61-07c7-4b48-aa1e-c9a582db6910"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/cel-cpp"},"File Name":{"kind":"string","value":"standard_library"},"File Path in Repository":{"kind":"string","value":"checker/standard_library.cc"},"File Path for Unit Test":{"kind":"string","value":"checker/standard_library_test.cc"},"Code":{"kind":"string","value":"#include \"checker/standard_library.h\"\n#include \n#include \n#include \"absl/base/no_destructor.h\"\n#include \"absl/status/status.h\"\n#include \"base/builtins.h\"\n#include \"checker/internal/builtins_arena.h\"\n#include \"checker/type_checker_builder.h\"\n#include \"common/constant.h\"\n#include \"common/decl.h\"\n#include \"common/type.h\"\n#include \"internal/status_macros.h\"\nnamespace cel {\nnamespace {\nusing ::cel::checker_internal::BuiltinsArena;\nTypeParamType TypeParamA() { return TypeParamType(\"A\"); }\nTypeParamType TypeParamB() { return TypeParamType(\"B\"); }\nType ListOfA() {\n static absl::NoDestructor kInstance(\n ListType(BuiltinsArena(), TypeParamA()));\n return *kInstance;\n}\nType MapOfAB() {\n static absl::NoDestructor kInstance(\n MapType(BuiltinsArena(), TypeParamA(), TypeParamB()));\n return *kInstance;\n}\nType TypeOfA() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), TypeParamA()));\n return *kInstance;\n}\nType TypeNullType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), NullType()));\n return *kInstance;\n}\nType TypeBoolType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), BoolType()));\n return *kInstance;\n}\nType TypeIntType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), IntType()));\n return *kInstance;\n}\nType TypeUintType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), UintType()));\n return *kInstance;\n}\nType TypeDoubleType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), DoubleType()));\n return *kInstance;\n}\nType TypeStringType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), StringType()));\n return *kInstance;\n}\nType TypeBytesType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), BytesType()));\n return *kInstance;\n}\nType TypeDurationType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), DurationType()));\n return *kInstance;\n}\nType TypeTimestampType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), TimestampType()));\n return *kInstance;\n}\nType TypeListType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), ListOfA()));\n return *kInstance;\n}\nType TypeMapType() {\n static absl::NoDestructor kInstance(\n TypeType(BuiltinsArena(), MapOfAB()));\n return *kInstance;\n}\nclass StandardOverloads {\n public:\n static constexpr char kAddInt[] = \"add_int64\";\n static constexpr char kAddUint[] = \"add_uint64\";\n static constexpr char kAddDouble[] = \"add_double\";\n static constexpr char kAddDurationDuration[] = \"add_duration_duration\";\n static constexpr char kAddDurationTimestamp[] = \"add_duration_timestamp\";\n static constexpr char kAddTimestampDuration[] = \"add_timestamp_duration\";\n static constexpr char kAddString[] = \"add_string\";\n static constexpr char kAddBytes[] = \"add_bytes\";\n static constexpr char kAddList[] = \"add_list\";\n static constexpr char kSubtractInt[] = \"subtract_int64\";\n static constexpr char kSubtractUint[] = \"subtract_uint64\";\n static constexpr char kSubtractDouble[] = \"subtract_double\";\n static constexpr char kSubtractDurationDuration[] =\n \"subtract_duration_duration\";\n static constexpr char kSubtractTimestampDuration[] =\n \"subtract_timestamp_duration\";\n static constexpr char kSubtractTimestampTimestamp[] =\n \"subtract_timestamp_timestamp\";\n static constexpr char kMultiplyInt[] = \"multiply_int64\";\n static constexpr char kMultiplyUint[] = \"multiply_uint64\";\n static constexpr char kMultiplyDouble[] = \"multiply_double\";\n static constexpr char kDivideInt[] = \"divide_int64\";\n static constexpr char kDivideUint[] = \"divide_uint64\";\n static constexpr char kDivideDouble[] = \"divide_double\";\n static constexpr char kModuloInt[] = \"modulo_int64\";\n static constexpr char kModuloUint[] = \"modulo_uint64\";\n static constexpr char kNegateInt[] = \"negate_int64\";\n static constexpr char kNegateDouble[] = \"negate_double\";\n static constexpr char kNot[] = \"logical_not\";\n static constexpr char kAnd[] = \"logical_and\";\n static constexpr char kOr[] = \"logical_or\";\n static constexpr char kConditional[] = \"conditional\";\n static constexpr char kNotStrictlyFalse[] = \"not_strictly_false\";\n static constexpr char kNotStrictlyFalseDeprecated[] =\n \"__not_strictly_false__\";\n static constexpr char kEquals[] = \"equals\";\n static constexpr char kNotEquals[] = \"not_equals\";\n static constexpr char kLessBool[] = \"less_bool\";\n static constexpr char kLessString[] = \"less_string\";\n static constexpr char kLessBytes[] = \"less_bytes\";\n static constexpr char kLessDuration[] = \"less_duration\";\n static constexpr char kLessTimestamp[] = \"less_timestamp\";\n static constexpr char kLessInt[] = \"less_int64\";\n static constexpr char kLessIntUint[] = \"less_int64_uint64\";\n static constexpr char kLessIntDouble[] = \"less_int64_double\";\n static constexpr char kLessDouble[] = \"less_double\";\n static constexpr char kLessDoubleInt[] = \"less_double_int64\";\n static constexpr char kLessDoubleUint[] = \"less_double_uint64\";\n static constexpr char kLessUint[] = \"less_uint64\";\n static constexpr char kLessUintInt[] = \"less_uint64_int64\";\n static constexpr char kLessUintDouble[] = \"less_uint64_double\";\n static constexpr char kGreaterBool[] = \"greater_bool\";\n static constexpr char kGreaterString[] = \"greater_string\";\n static constexpr char kGreaterBytes[] = \"greater_bytes\";\n static constexpr char kGreaterDuration[] = \"greater_duration\";\n static constexpr char kGreaterTimestamp[] = \"greater_timestamp\";\n static constexpr char kGreaterInt[] = \"greater_int64\";\n static constexpr char kGreaterIntUint[] = \"greater_int64_uint64\";\n static constexpr char kGreaterIntDouble[] = \"greater_int64_double\";\n static constexpr char kGreaterDouble[] = \"greater_double\";\n static constexpr char kGreaterDoubleInt[] = \"greater_double_int64\";\n static constexpr char kGreaterDoubleUint[] = \"greater_double_uint64\";\n static constexpr char kGreaterUint[] = \"greater_uint64\";\n static constexpr char kGreaterUintInt[] = \"greater_uint64_int64\";\n static constexpr char kGreaterUintDouble[] = \"greater_uint64_double\";\n static constexpr char kGreaterEqualsBool[] = \"greater_equals_bool\";\n static constexpr char kGreaterEqualsString[] = \"greater_equals_string\";\n static constexpr char kGreaterEqualsBytes[] = \"greater_equals_bytes\";\n static constexpr char kGreaterEqualsDuration[] = \"greater_equals_duration\";\n static constexpr char kGreaterEqualsTimestamp[] = \"greater_equals_timestamp\";\n static constexpr char kGreaterEqualsInt[] = \"greater_equals_int64\";\n static constexpr char kGreaterEqualsIntUint[] = \"greater_equals_int64_uint64\";\n static constexpr char kGreaterEqualsIntDouble[] =\n \"greater_equals_int64_double\";\n static constexpr char kGreaterEqualsDouble[] = \"greater_equals_double\";\n static constexpr char kGreaterEqualsDoubleInt[] =\n \"greater_equals_double_int64\";\n static constexpr char kGreaterEqualsDoubleUint[] =\n \"greater_equals_double_uint64\";\n static constexpr char kGreaterEqualsUint[] = \"greater_equals_uint64\";\n static constexpr char kGreaterEqualsUintInt[] = \"greater_equals_uint64_int64\";\n static constexpr char kGreaterEqualsUintDouble[] =\n \"greater_equals_uint_double\";\n static constexpr char kLessEqualsBool[] = \"less_equals_bool\";\n static constexpr char kLessEqualsString[] = \"less_equals_string\";\n static constexpr char kLessEqualsBytes[] = \"less_equals_bytes\";\n static constexpr char kLessEqualsDuration[] = \"less_equals_duration\";\n static constexpr char kLessEqualsTimestamp[] = \"less_equals_timestamp\";\n static constexpr char kLessEqualsInt[] = \"less_equals_int64\";\n static constexpr char kLessEqualsIntUint[] = \"less_equals_int64_uint64\";\n static constexpr char kLessEqualsIntDouble[] = \"less_equals_int64_double\";\n static constexpr char kLessEqualsDouble[] = \"less_equals_double\";\n static constexpr char kLessEqualsDoubleInt[] = \"less_equals_double_int64\";\n static constexpr char kLessEqualsDoubleUint[] = \"less_equals_double_uint64\";\n static constexpr char kLessEqualsUint[] = \"less_equals_uint64\";\n static constexpr char kLessEqualsUintInt[] = \"less_equals_uint64_int64\";\n static constexpr char kLessEqualsUintDouble[] = \"less_equals_uint64_double\";\n static constexpr char kIndexList[] = \"index_list\";\n static constexpr char kIndexMap[] = \"index_map\";\n static constexpr char kInList[] = \"in_list\";\n static constexpr char kInMap[] = \"in_map\";\n static constexpr char kSizeBytes[] = \"size_bytes\";\n static constexpr char kSizeList[] = \"size_list\";\n static constexpr char kSizeMap[] = \"size_map\";\n static constexpr char kSizeString[] = \"size_string\";\n static constexpr char kSizeBytesMember[] = \"bytes_size\";\n static constexpr char kSizeListMember[] = \"list_size\";\n static constexpr char kSizeMapMember[] = \"map_size\";\n static constexpr char kSizeStringMember[] = \"string_size\";\n static constexpr char kContainsString[] = \"contains_string\";\n static constexpr char kEndsWithString[] = \"ends_with_string\";\n static constexpr char kStartsWithString[] = \"starts_with_string\";\n static constexpr char kMatches[] = \"matches\";\n static constexpr char kMatchesMember[] = \"matches_string\";\n static constexpr char kTimestampToYear[] = \"timestamp_to_year\";\n static constexpr char kTimestampToYearWithTz[] = \"timestamp_to_year_with_tz\";\n static constexpr char kTimestampToMonth[] = \"timestamp_to_month\";\n static constexpr char kTimestampToMonthWithTz[] =\n \"timestamp_to_month_with_tz\";\n static constexpr char kTimestampToDayOfYear[] = \"timestamp_to_day_of_year\";\n static constexpr char kTimestampToDayOfYearWithTz[] =\n \"timestamp_to_day_of_year_with_tz\";\n static constexpr char kTimestampToDayOfMonth[] = \"timestamp_to_day_of_month\";\n static constexpr char kTimestampToDayOfMonthWithTz[] =\n \"timestamp_to_day_of_month_with_tz\";\n static constexpr char kTimestampToDayOfWeek[] = \"timestamp_to_day_of_week\";\n static constexpr char kTimestampToDayOfWeekWithTz[] =\n \"timestamp_to_day_of_week_with_tz\";\n static constexpr char kTimestampToDate[] =\n \"timestamp_to_day_of_month_1_based\";\n static constexpr char kTimestampToDateWithTz[] =\n \"timestamp_to_day_of_month_1_based_with_tz\";\n static constexpr char kTimestampToHours[] = \"timestamp_to_hours\";\n static constexpr char kTimestampToHoursWithTz[] =\n \"timestamp_to_hours_with_tz\";\n static constexpr char kDurationToHours[] = \"duration_to_hours\";\n static constexpr char kTimestampToMinutes[] = \"timestamp_to_minutes\";\n static constexpr char kTimestampToMinutesWithTz[] =\n \"timestamp_to_minutes_with_tz\";\n static constexpr char kDurationToMinutes[] = \"duration_to_minutes\";\n static constexpr char kTimestampToSeconds[] = \"timestamp_to_seconds\";\n static constexpr char kTimestampToSecondsWithTz[] = \"timestamp_to_seconds_tz\";\n static constexpr char kDurationToSeconds[] = \"duration_to_seconds\";\n static constexpr char kTimestampToMilliseconds[] =\n \"timestamp_to_milliseconds\";\n static constexpr char kTimestampToMillisecondsWithTz[] =\n \"timestamp_to_milliseconds_with_tz\";\n static constexpr char kDurationToMilliseconds[] = \"duration_to_milliseconds\";\n static constexpr char kToDyn[] = \"to_dyn\";\n static constexpr char kUintToUint[] = \"uint64_to_uint64\";\n static constexpr char kDoubleToUint[] = \"double_to_uint64\";\n static constexpr char kIntToUint[] = \"int64_to_uint64\";\n static constexpr char kStringToUint[] = \"string_to_uint64\";\n static constexpr char kUintToInt[] = \"uint64_to_int64\";\n static constexpr char kDoubleToInt[] = \"double_to_int64\";\n static constexpr char kIntToInt[] = \"int64_to_int64\";\n static constexpr char kStringToInt[] = \"string_to_int64\";\n static constexpr char kTimestampToInt[] = \"timestamp_to_int64\";\n static constexpr char kDurationToInt[] = \"duration_to_int64\";\n static constexpr char kDoubleToDouble[] = \"double_to_double\";\n static constexpr char kUintToDouble[] = \"uint64_to_double\";\n static constexpr char kIntToDouble[] = \"int64_to_double\";\n static constexpr char kStringToDouble[] = \"string_to_double\";\n static constexpr char kBoolToBool[] = \"bool_to_bool\";\n static constexpr char kStringToBool[] = \"string_to_bool\";\n static constexpr char kBytesToBytes[] = \"bytes_to_bytes\";\n static constexpr char kStringToBytes[] = \"string_to_bytes\";\n static constexpr char kStringToString[] = \"string_to_string\";\n static constexpr char kBytesToString[] = \"bytes_to_string\";\n static constexpr char kBoolToString[] = \"bool_to_string\";\n static constexpr char kDoubleToString[] = \"double_to_string\";\n static constexpr char kIntToString[] = \"int64_to_string\";\n static constexpr char kUintToString[] = \"uint64_to_string\";\n static constexpr char kDurationToString[] = \"duration_to_string\";\n static constexpr char kTimestampToString[] = \"timestamp_to_string\";\n static constexpr char kTimestampToTimestamp[] = \"timestamp_to_timestamp\";\n static constexpr char kIntToTimestamp[] = \"int64_to_timestamp\";\n static constexpr char kStringToTimestamp[] = \"string_to_timestamp\";\n static constexpr char kDurationToDuration[] = \"duration_to_duration\";\n static constexpr char kIntToDuration[] = \"int64_to_duration\";\n static constexpr char kStringToDuration[] = \"string_to_duration\";\n static constexpr char kToType[] = \"type\";\n};\nabsl::Status AddArithmeticOps(TypeCheckerBuilder& builder) {\n FunctionDecl add_op;\n add_op.set_name(builtin::kAdd);\n CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kAddInt, IntType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kAddDouble, DoubleType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kAddUint, UintType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kAddDurationDuration, DurationType(),\n DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kAddDurationTimestamp,\n TimestampType(), DurationType(), TimestampType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kAddTimestampDuration,\n TimestampType(), TimestampType(), DurationType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kAddBytes, BytesType(), BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kAddString, StringType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kAddList, ListOfA(), ListOfA(), ListOfA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(add_op)));\n FunctionDecl subtract_op;\n subtract_op.set_name(builtin::kSubtract);\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kSubtractInt, IntType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kSubtractUint, UintType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSubtractDouble, DoubleType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSubtractDurationDuration,\n DurationType(), DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSubtractTimestampDuration,\n TimestampType(), TimestampType(), DurationType())));\n CEL_RETURN_IF_ERROR(subtract_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSubtractTimestampTimestamp,\n DurationType(), TimestampType(), TimestampType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(subtract_op)));\n FunctionDecl multiply_op;\n multiply_op.set_name(builtin::kMultiply);\n CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kMultiplyInt, IntType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kMultiplyUint, UintType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(multiply_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kMultiplyDouble, DoubleType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(multiply_op)));\n FunctionDecl division_op;\n division_op.set_name(builtin::kDivide);\n CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDivideInt, IntType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDivideUint, UintType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(division_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kDivideDouble, DoubleType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(division_op)));\n FunctionDecl modulo_op;\n modulo_op.set_name(builtin::kModulo);\n CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kModuloInt, IntType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kModuloUint, UintType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(modulo_op)));\n FunctionDecl negate_op;\n negate_op.set_name(builtin::kNeg);\n CEL_RETURN_IF_ERROR(negate_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kNegateInt, IntType(), IntType())));\n CEL_RETURN_IF_ERROR(negate_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kNegateDouble, DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(negate_op)));\n return absl::OkStatus();\n}\nabsl::Status AddLogicalOps(TypeCheckerBuilder& builder) {\n FunctionDecl not_op;\n not_op.set_name(builtin::kNot);\n CEL_RETURN_IF_ERROR(not_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kNot, BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_op)));\n FunctionDecl and_op;\n and_op.set_name(builtin::kAnd);\n CEL_RETURN_IF_ERROR(and_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kAnd, BoolType(), BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(and_op)));\n FunctionDecl or_op;\n or_op.set_name(builtin::kOr);\n CEL_RETURN_IF_ERROR(or_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kOr, BoolType(), BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(or_op)));\n FunctionDecl conditional_op;\n conditional_op.set_name(builtin::kTernary);\n CEL_RETURN_IF_ERROR(conditional_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kConditional, TypeParamA(),\n BoolType(), TypeParamA(), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(conditional_op)));\n FunctionDecl not_strictly_false;\n not_strictly_false.set_name(builtin::kNotStrictlyFalse);\n CEL_RETURN_IF_ERROR(not_strictly_false.AddOverload(MakeOverloadDecl(\n StandardOverloads::kNotStrictlyFalse, BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_strictly_false)));\n FunctionDecl not_strictly_false_deprecated;\n not_strictly_false_deprecated.set_name(builtin::kNotStrictlyFalseDeprecated);\n CEL_RETURN_IF_ERROR(not_strictly_false_deprecated.AddOverload(\n MakeOverloadDecl(StandardOverloads::kNotStrictlyFalseDeprecated,\n BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(\n builder.AddFunction(std::move(not_strictly_false_deprecated)));\n return absl::OkStatus();\n}\nabsl::Status AddTypeConversions(TypeCheckerBuilder& builder) {\n FunctionDecl to_dyn;\n to_dyn.set_name(builtin::kDyn);\n CEL_RETURN_IF_ERROR(to_dyn.AddOverload(\n MakeOverloadDecl(StandardOverloads::kToDyn, DynType(), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_dyn)));\n FunctionDecl to_uint;\n to_uint.set_name(builtin::kUint);\n CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(\n StandardOverloads::kUintToUint, UintType(), UintType())));\n CEL_RETURN_IF_ERROR(to_uint.AddOverload(\n MakeOverloadDecl(StandardOverloads::kIntToUint, UintType(), IntType())));\n CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDoubleToUint, UintType(), DoubleType())));\n CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToUint, UintType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_uint)));\n FunctionDecl to_int;\n to_int.set_name(builtin::kInt);\n CEL_RETURN_IF_ERROR(to_int.AddOverload(\n MakeOverloadDecl(StandardOverloads::kIntToInt, IntType(), IntType())));\n CEL_RETURN_IF_ERROR(to_int.AddOverload(\n MakeOverloadDecl(StandardOverloads::kUintToInt, IntType(), UintType())));\n CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDoubleToInt, IntType(), DoubleType())));\n CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToInt, IntType(), StringType())));\n CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(\n StandardOverloads::kTimestampToInt, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDurationToInt, IntType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_int)));\n FunctionDecl to_double;\n to_double.set_name(builtin::kDouble);\n CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDoubleToDouble, DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIntToDouble, DoubleType(), IntType())));\n CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(\n StandardOverloads::kUintToDouble, DoubleType(), UintType())));\n CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToDouble, DoubleType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_double)));\n FunctionDecl to_bool;\n to_bool.set_name(\"bool\");\n CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl(\n StandardOverloads::kBoolToBool, BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToBool, BoolType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bool)));\n FunctionDecl to_string;\n to_string.set_name(builtin::kString);\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToString, StringType(), StringType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kBytesToString, StringType(), BytesType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kBoolToString, StringType(), BoolType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDoubleToString, StringType(), DoubleType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIntToString, StringType(), IntType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kUintToString, StringType(), UintType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kTimestampToString, StringType(), TimestampType())));\n CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDurationToString, StringType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_string)));\n FunctionDecl to_bytes;\n to_bytes.set_name(builtin::kBytes);\n CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl(\n StandardOverloads::kBytesToBytes, BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToBytes, BytesType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bytes)));\n FunctionDecl to_timestamp;\n to_timestamp.set_name(builtin::kTimestamp);\n CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(\n MakeOverloadDecl(StandardOverloads::kTimestampToTimestamp,\n TimestampType(), TimestampType())));\n CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToTimestamp, TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIntToTimestamp, TimestampType(), IntType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_timestamp)));\n FunctionDecl to_duration;\n to_duration.set_name(builtin::kDuration);\n CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(\n StandardOverloads::kDurationToDuration, DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(\n StandardOverloads::kStringToDuration, DurationType(), StringType())));\n CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIntToDuration, DurationType(), IntType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_duration)));\n FunctionDecl to_type;\n to_type.set_name(builtin::kType);\n CEL_RETURN_IF_ERROR(to_type.AddOverload(MakeOverloadDecl(\n StandardOverloads::kToType, Type(TypeOfA()), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_type)));\n return absl::OkStatus();\n}\nabsl::Status AddEqualityOps(TypeCheckerBuilder& builder) {\n FunctionDecl equals_op;\n equals_op.set_name(builtin::kEqual);\n CEL_RETURN_IF_ERROR(equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kEquals, BoolType(), TypeParamA(), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(equals_op)));\n FunctionDecl not_equals_op;\n not_equals_op.set_name(builtin::kInequal);\n CEL_RETURN_IF_ERROR(not_equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kNotEquals, BoolType(), TypeParamA(), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_equals_op)));\n return absl::OkStatus();\n}\nabsl::Status AddConatainerOps(TypeCheckerBuilder& builder) {\n FunctionDecl index;\n index.set_name(builtin::kIndex);\n CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIndexList, TypeParamA(), ListOfA(), IntType())));\n CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl(\n StandardOverloads::kIndexMap, TypeParamB(), MapOfAB(), TypeParamA())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(index)));\n FunctionDecl in_op;\n in_op.set_name(builtin::kIn);\n CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));\n CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op)));\n FunctionDecl in_function_deprecated;\n in_function_deprecated.set_name(builtin::kInFunction);\n CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));\n CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_function_deprecated)));\n FunctionDecl in_op_deprecated;\n in_op_deprecated.set_name(builtin::kInDeprecated);\n CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));\n CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl(\n StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op_deprecated)));\n FunctionDecl size;\n size.set_name(builtin::kSize);\n CEL_RETURN_IF_ERROR(size.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSizeList, IntType(), ListOfA())));\n CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kSizeListMember, IntType(), ListOfA())));\n CEL_RETURN_IF_ERROR(size.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSizeMap, IntType(), MapOfAB())));\n CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kSizeMapMember, IntType(), MapOfAB())));\n CEL_RETURN_IF_ERROR(size.AddOverload(\n MakeOverloadDecl(StandardOverloads::kSizeBytes, IntType(), BytesType())));\n CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kSizeBytesMember, IntType(), BytesType())));\n CEL_RETURN_IF_ERROR(size.AddOverload(MakeOverloadDecl(\n StandardOverloads::kSizeString, IntType(), StringType())));\n CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kSizeStringMember, IntType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(size)));\n return absl::OkStatus();\n}\nabsl::Status AddRelationOps(TypeCheckerBuilder& builder) {\n FunctionDecl less_op;\n less_op.set_name(builtin::kLess);\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessInt, BoolType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessUint, BoolType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessDouble, BoolType(), DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessBool, BoolType(), BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessString, BoolType(), StringType(), StringType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessBytes, BoolType(), BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessDuration, BoolType(),\n DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessTimestamp, BoolType(),\n TimestampType(), TimestampType())));\n FunctionDecl greater_op;\n greater_op.set_name(builtin::kGreater);\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kGreaterInt, BoolType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kGreaterUint, BoolType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterDouble, BoolType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kGreaterBool, BoolType(), BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kGreaterBytes, BoolType(), BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterDuration, BoolType(),\n DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterTimestamp, BoolType(),\n TimestampType(), TimestampType())));\n FunctionDecl less_equals_op;\n less_equals_op.set_name(builtin::kLessOrEqual);\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessEqualsInt, BoolType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessEqualsUint, BoolType(), UintType(), UintType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsDouble, BoolType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessEqualsBool, BoolType(), BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsBytes, BoolType(),\n BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsDuration, BoolType(),\n DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsTimestamp, BoolType(),\n TimestampType(), TimestampType())));\n FunctionDecl greater_equals_op;\n greater_equals_op.set_name(builtin::kGreaterOrEqual);\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kGreaterEqualsInt, BoolType(), IntType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsUint, BoolType(),\n UintType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsDouble, BoolType(),\n DoubleType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsBool, BoolType(),\n BoolType(), BoolType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsBytes, BoolType(),\n BytesType(), BytesType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsDuration, BoolType(),\n DurationType(), DurationType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsTimestamp, BoolType(),\n TimestampType(), TimestampType())));\n if (builder.options().enable_cross_numeric_comparisons) {\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessIntUint, BoolType(), IntType(), UintType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessIntDouble, BoolType(),\n IntType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(\n StandardOverloads::kLessUintInt, BoolType(), UintType(), IntType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessUintDouble, BoolType(),\n UintType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessDoubleInt, BoolType(),\n DoubleType(), IntType())));\n CEL_RETURN_IF_ERROR(less_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessDoubleUint, BoolType(),\n DoubleType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterIntUint, BoolType(),\n IntType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterIntDouble, BoolType(),\n IntType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterUintInt, BoolType(),\n UintType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterUintDouble, BoolType(),\n UintType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterDoubleInt, BoolType(),\n DoubleType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterDoubleUint, BoolType(),\n DoubleType(), UintType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsIntUint, BoolType(),\n IntType(), UintType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsIntDouble, BoolType(),\n IntType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsUintInt, BoolType(),\n UintType(), IntType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsUintDouble, BoolType(),\n UintType(), DoubleType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleInt, BoolType(),\n DoubleType(), IntType())));\n CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleUint, BoolType(),\n DoubleType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntUint, BoolType(),\n IntType(), UintType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntDouble, BoolType(),\n IntType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintInt, BoolType(),\n UintType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintDouble,\n BoolType(), UintType(), DoubleType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleInt, BoolType(),\n DoubleType(), IntType())));\n CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(\n MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleUint,\n BoolType(), DoubleType(), UintType())));\n }\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_op)));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_op)));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_equals_op)));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_equals_op)));\n return absl::OkStatus();\n}\nabsl::Status AddStringFunctions(TypeCheckerBuilder& builder) {\n FunctionDecl contains;\n contains.set_name(builtin::kStringContains);\n CEL_RETURN_IF_ERROR(contains.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kContainsString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(contains)));\n FunctionDecl starts_with;\n starts_with.set_name(builtin::kStringStartsWith);\n CEL_RETURN_IF_ERROR(starts_with.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kStartsWithString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(starts_with)));\n FunctionDecl ends_with;\n ends_with.set_name(builtin::kStringEndsWith);\n CEL_RETURN_IF_ERROR(ends_with.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kEndsWithString, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(ends_with)));\n return absl::OkStatus();\n}\nabsl::Status AddRegexFunctions(TypeCheckerBuilder& builder) {\n FunctionDecl matches;\n matches.set_name(builtin::kRegexMatch);\n CEL_RETURN_IF_ERROR(matches.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kMatchesMember, BoolType(),\n StringType(), StringType())));\n CEL_RETURN_IF_ERROR(matches.AddOverload(MakeOverloadDecl(\n StandardOverloads::kMatches, BoolType(), StringType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(matches)));\n return absl::OkStatus();\n}\nabsl::Status AddTimeFunctions(TypeCheckerBuilder& builder) {\n FunctionDecl get_full_year;\n get_full_year.set_name(builtin::kFullYear);\n CEL_RETURN_IF_ERROR(get_full_year.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToYear, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_full_year.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToYearWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_full_year)));\n FunctionDecl get_month;\n get_month.set_name(builtin::kMonth);\n CEL_RETURN_IF_ERROR(get_month.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToMonth, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_month.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToMonthWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_month)));\n FunctionDecl get_day_of_year;\n get_day_of_year.set_name(builtin::kDayOfYear);\n CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToDayOfYear, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfYearWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_year)));\n FunctionDecl get_day_of_month;\n get_day_of_month.set_name(builtin::kDayOfMonth);\n CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToDayOfMonth, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfMonthWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_month)));\n FunctionDecl get_date;\n get_date.set_name(builtin::kDate);\n CEL_RETURN_IF_ERROR(get_date.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToDate, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_date.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToDateWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_date)));\n FunctionDecl get_day_of_week;\n get_day_of_week.set_name(builtin::kDayOfWeek);\n CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToDayOfWeek, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfWeekWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_week)));\n FunctionDecl get_hours;\n get_hours.set_name(builtin::kHours);\n CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToHours, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_hours.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToHoursWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kDurationToHours, IntType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_hours)));\n FunctionDecl get_minutes;\n get_minutes.set_name(builtin::kMinutes);\n CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToMinutes, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_minutes.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToMinutesWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kDurationToMinutes, IntType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_minutes)));\n FunctionDecl get_seconds;\n get_seconds.set_name(builtin::kSeconds);\n CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kTimestampToSeconds, IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_seconds.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToSecondsWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kDurationToSeconds, IntType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_seconds)));\n FunctionDecl get_milliseconds;\n get_milliseconds.set_name(builtin::kMilliseconds);\n CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToMilliseconds,\n IntType(), TimestampType())));\n CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(\n MakeMemberOverloadDecl(StandardOverloads::kTimestampToMillisecondsWithTz,\n IntType(), TimestampType(), StringType())));\n CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(MakeMemberOverloadDecl(\n StandardOverloads::kDurationToMilliseconds, IntType(), DurationType())));\n CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_milliseconds)));\n return absl::OkStatus();\n}\nabsl::Status AddTypeConstantVariables(TypeCheckerBuilder& builder) {\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(\"bool\", TypeBoolType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(\"null_type\", TypeNullType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(builtin::kInt, TypeIntType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(builtin::kUint, TypeUintType())));\n CEL_RETURN_IF_ERROR(builder.AddVariable(\n MakeVariableDecl(builtin::kDouble, TypeDoubleType())));\n CEL_RETURN_IF_ERROR(builder.AddVariable(\n MakeVariableDecl(builtin::kString, TypeStringType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(builtin::kBytes, TypeBytesType())));\n CEL_RETURN_IF_ERROR(builder.AddVariable(\n MakeVariableDecl(builtin::kDuration, TypeDurationType())));\n CEL_RETURN_IF_ERROR(builder.AddVariable(\n MakeVariableDecl(builtin::kTimestamp, TypeTimestampType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(\"list\", TypeListType())));\n CEL_RETURN_IF_ERROR(\n builder.AddVariable(MakeVariableDecl(\"map\", TypeMapType())));\n CEL_RETURN_IF_ERROR(builder.AddVariable(MakeVariableDecl(\"type\", TypeOfA())));\n return absl::OkStatus();\n}\nabsl::Status AddEnumConstants(TypeCheckerBuilder& builder) {\n VariableDecl pb_null;\n pb_null.set_name(\"google.protobuf.NullValue.NULL_VALUE\");\n pb_null.set_type(NullType());\n pb_null.set_value(Constant(nullptr));\n CEL_RETURN_IF_ERROR(builder.AddVariable(std::move(pb_null)));\n return absl::OkStatus();\n}\nabsl::Status AddStandardLibraryDecls(TypeCheckerBuilder& builder) {\n CEL_RETURN_IF_ERROR(AddLogicalOps(builder));\n CEL_RETURN_IF_ERROR(AddArithmeticOps(builder));\n CEL_RETURN_IF_ERROR(AddTypeConversions(builder));\n CEL_RETURN_IF_ERROR(AddEqualityOps(builder));\n CEL_RETURN_IF_ERROR(AddConatainerOps(builder));\n CEL_RETURN_IF_ERROR(AddRelationOps(builder));\n CEL_RETURN_IF_ERROR(AddStringFunctions(builder));\n CEL_RETURN_IF_ERROR(AddRegexFunctions(builder));\n CEL_RETURN_IF_ERROR(AddTimeFunctions(builder));\n CEL_RETURN_IF_ERROR(AddTypeConstantVariables(builder));\n CEL_RETURN_IF_ERROR(AddEnumConstants(builder));\n return absl::OkStatus();\n}\n} \nCheckerLibrary StandardLibrary() { return {\"stdlib\", AddStandardLibraryDecls}; }\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"checker/standard_library.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/status_matchers.h\"\n#include \"base/ast_internal/ast_impl.h\"\n#include \"base/ast_internal/expr.h\"\n#include \"checker/internal/test_ast_helpers.h\"\n#include \"checker/type_checker.h\"\n#include \"checker/type_checker_builder.h\"\n#include \"checker/validation_result.h\"\n#include \"common/ast.h\"\n#include \"common/constant.h\"\n#include \"internal/testing.h\"\nnamespace cel {\nnamespace {\nusing ::absl_testing::IsOk;\nusing ::absl_testing::StatusIs;\nusing ::cel::ast_internal::AstImpl;\nusing ::cel::ast_internal::Reference;\nusing ::testing::IsEmpty;\nusing ::testing::Pointee;\nusing ::testing::Property;\nTEST(StandardLibraryTest, StandardLibraryAddsDecls) {\n TypeCheckerBuilder builder;\n EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());\n EXPECT_THAT(std::move(builder).Build(), IsOk());\n}\nTEST(StandardLibraryTest, StandardLibraryErrorsIfAddedTwice) {\n TypeCheckerBuilder builder;\n EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());\n EXPECT_THAT(builder.AddLibrary(StandardLibrary()),\n StatusIs(absl::StatusCode::kAlreadyExists));\n}\nclass StandardLibraryDefinitionsTest : public ::testing::Test {\n public:\n void SetUp() override {\n TypeCheckerBuilder builder;\n ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());\n ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build());\n }\n protected:\n std::unique_ptr stdlib_type_checker_;\n};\nclass StdlibTypeVarDefinitionTest\n : public StandardLibraryDefinitionsTest,\n public testing::WithParamInterface {};\nTEST_P(StdlibTypeVarDefinitionTest, DefinesTypeConstants) {\n auto ast = std::make_unique();\n ast->root_expr().mutable_ident_expr().set_name(GetParam());\n ast->root_expr().set_id(1);\n ASSERT_OK_AND_ASSIGN(ValidationResult result,\n stdlib_type_checker_->Check(std::move(ast)));\n EXPECT_THAT(result.GetIssues(), IsEmpty());\n ASSERT_OK_AND_ASSIGN(std::unique_ptr checked_ast, result.ReleaseAst());\n const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast);\n EXPECT_THAT(checked_impl.GetReference(1),\n Pointee(Property(&Reference::name, GetParam())));\n}\nINSTANTIATE_TEST_SUITE_P(\n StdlibTypeVarDefinitions, StdlibTypeVarDefinitionTest,\n ::testing::Values(\"bool\", \"int\", \"uint\", \"double\", \"string\", \"bytes\",\n \"list\", \"map\", \"duration\", \"timestamp\", \"null_type\"),\n [](const auto& info) -> std::string { return info.param; });\nTEST_F(StandardLibraryDefinitionsTest, DefinesProtoStructNull) {\n auto ast = std::make_unique();\n auto& enumerator = ast->root_expr();\n enumerator.set_id(4);\n enumerator.mutable_select_expr().set_field(\"NULL_VALUE\");\n auto& enumeration = enumerator.mutable_select_expr().mutable_operand();\n enumeration.set_id(3);\n enumeration.mutable_select_expr().set_field(\"NullValue\");\n auto& protobuf = enumeration.mutable_select_expr().mutable_operand();\n protobuf.set_id(2);\n protobuf.mutable_select_expr().set_field(\"protobuf\");\n auto& google = protobuf.mutable_select_expr().mutable_operand();\n google.set_id(1);\n google.mutable_ident_expr().set_name(\"google\");\n ASSERT_OK_AND_ASSIGN(ValidationResult result,\n stdlib_type_checker_->Check(std::move(ast)));\n EXPECT_THAT(result.GetIssues(), IsEmpty());\n ASSERT_OK_AND_ASSIGN(std::unique_ptr checked_ast, result.ReleaseAst());\n const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast);\n EXPECT_THAT(checked_impl.GetReference(4),\n Pointee(Property(&Reference::name,\n \"google.protobuf.NullValue.NULL_VALUE\")));\n}\nstruct DefinitionsTestCase {\n std::string expr;\n bool type_check_success = true;\n CheckerOptions options;\n};\nclass StdLibDefinitionsTest\n : public ::testing::TestWithParam {\n public:\n void SetUp() override {\n TypeCheckerBuilder builder;\n ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());\n ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build());\n }\n protected:\n std::unique_ptr stdlib_type_checker_;\n};\nTEST_P(StdLibDefinitionsTest, Runner) {\n TypeCheckerBuilder builder(GetParam().options);\n ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());\n ASSERT_OK_AND_ASSIGN(std::unique_ptr type_checker,\n std::move(builder).Build());\n ASSERT_OK_AND_ASSIGN(std::unique_ptr ast,\n checker_internal::MakeTestParsedAst(GetParam().expr));\n ASSERT_OK_AND_ASSIGN(auto result, type_checker->Check(std::move(ast)));\n EXPECT_EQ(result.IsValid(), GetParam().type_check_success);\n}\nINSTANTIATE_TEST_SUITE_P(\n Strings, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"'123'.size()\",\n },\n DefinitionsTestCase{\n \"size('123')\",\n },\n DefinitionsTestCase{\n \"'123' + '123'\",\n },\n DefinitionsTestCase{\n \"'123' + '123'\",\n },\n DefinitionsTestCase{\n \"'123' + '123'\",\n },\n DefinitionsTestCase{\n \"'123'.endsWith('123')\",\n },\n DefinitionsTestCase{\n \"'123'.startsWith('123')\",\n },\n DefinitionsTestCase{\n \"'123'.contains('123')\",\n },\n DefinitionsTestCase{\n \"'123'.matches(r'123')\",\n },\n DefinitionsTestCase{\n \"matches('123', r'123')\",\n }));\nINSTANTIATE_TEST_SUITE_P(TypeCasts, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"int(1)\",\n },\n DefinitionsTestCase{\n \"uint(1)\",\n },\n DefinitionsTestCase{\n \"double(1)\",\n },\n DefinitionsTestCase{\n \"string(1)\",\n },\n DefinitionsTestCase{\n \"bool('true')\",\n },\n DefinitionsTestCase{\n \"timestamp(0)\",\n },\n DefinitionsTestCase{\n \"duration('1s')\",\n }));\nINSTANTIATE_TEST_SUITE_P(Arithmetic, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"1 + 2\",\n },\n DefinitionsTestCase{\n \"1 - 2\",\n },\n DefinitionsTestCase{\n \"1 / 2\",\n },\n DefinitionsTestCase{\n \"1 * 2\",\n },\n DefinitionsTestCase{\n \"2 % 1\",\n },\n DefinitionsTestCase{\n \"-1\",\n }));\nINSTANTIATE_TEST_SUITE_P(\n TimeArithmetic, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"timestamp(0) + duration('1s')\",\n },\n DefinitionsTestCase{\n \"timestamp(0) - duration('1s')\",\n },\n DefinitionsTestCase{\n \"timestamp(0) - timestamp(0)\",\n },\n DefinitionsTestCase{\n \"duration('1s') + duration('1s')\",\n },\n DefinitionsTestCase{\n \"duration('1s') - duration('1s')\",\n }));\nINSTANTIATE_TEST_SUITE_P(NumericComparisons, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"1 > 2\",\n },\n DefinitionsTestCase{\n \"1 < 2\",\n },\n DefinitionsTestCase{\n \"1 >= 2\",\n },\n DefinitionsTestCase{\n \"1 <= 2\",\n }));\nINSTANTIATE_TEST_SUITE_P(\n CrossNumericComparisons, StdLibDefinitionsTest,\n ::testing::Values(\n DefinitionsTestCase{\n \"1u < 2\",\n true,\n {.enable_cross_numeric_comparisons = true}},\n DefinitionsTestCase{\n \"1u > 2\",\n true,\n {.enable_cross_numeric_comparisons = true}},\n DefinitionsTestCase{\n \"1u <= 2\",\n true,\n {.enable_cross_numeric_comparisons = true}},\n DefinitionsTestCase{\n \"1u >= 2\",\n true,\n {.enable_cross_numeric_comparisons = true}}));\nINSTANTIATE_TEST_SUITE_P(\n TimeComparisons, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"duration('1s') < duration('1s')\",\n },\n DefinitionsTestCase{\n \"duration('1s') > duration('1s')\",\n },\n DefinitionsTestCase{\n \"duration('1s') <= duration('1s')\",\n },\n DefinitionsTestCase{\n \"duration('1s') >= duration('1s')\",\n },\n DefinitionsTestCase{\n \"timestamp(0) < timestamp(0)\",\n },\n DefinitionsTestCase{\n \"timestamp(0) > timestamp(0)\",\n },\n DefinitionsTestCase{\n \"timestamp(0) <= timestamp(0)\",\n },\n DefinitionsTestCase{\n \"timestamp(0) >= timestamp(0)\",\n }));\nINSTANTIATE_TEST_SUITE_P(\n TimeAccessors, StdLibDefinitionsTest,\n ::testing::Values(\n DefinitionsTestCase{\n \"timestamp(0).getFullYear()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getFullYear('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMonth()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMonth('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDayOfYear()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDayOfYear('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDate()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDate('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDayOfWeek()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getDayOfWeek('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getHours()\",\n },\n DefinitionsTestCase{\n \"duration('1s').getHours()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getHours('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMinutes()\",\n },\n DefinitionsTestCase{\n \"duration('1s').getMinutes()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMinutes('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getSeconds()\",\n },\n DefinitionsTestCase{\n \"duration('1s').getSeconds()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getSeconds('-08:00')\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMilliseconds()\",\n },\n DefinitionsTestCase{\n \"duration('1s').getMilliseconds()\",\n },\n DefinitionsTestCase{\n \"timestamp(0).getMilliseconds('-08:00')\",\n }));\nINSTANTIATE_TEST_SUITE_P(Logic, StdLibDefinitionsTest,\n ::testing::Values(DefinitionsTestCase{\n \"true || false\",\n },\n DefinitionsTestCase{\n \"true && false\",\n },\n DefinitionsTestCase{\n \"!true\",\n }));\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library_test.cc"},"Commit Hash":{"kind":"string","value":"4552db5798fb0853b131b783d8875794334fae7f"}}},{"rowIdx":226,"cells":{"ID":{"kind":"string","value":"c51d0205-c7bd-4898-90c7-d1f12f00a9d1"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"non_max_suppression_op"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/kernels/image/non_max_suppression_op.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/image/non_max_suppression_op_test.cc"},"Code":{"kind":"string","value":"#define EIGEN_USE_THREADS\n#include \"tensorflow/core/kernels/image/non_max_suppression_op.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"unsupported/Eigen/CXX11/Tensor\" \n#include \"tensorflow/core/framework/bounds_check.h\"\n#include \"tensorflow/core/framework/op_kernel.h\"\n#include \"tensorflow/core/framework/register_types.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\n#include \"tensorflow/core/framework/types.h\"\n#include \"tensorflow/core/lib/core/status.h\"\n#include \"tensorflow/core/platform/logging.h\"\nnamespace tensorflow {\nnamespace {\ntypedef Eigen::ThreadPoolDevice CPUDevice;\nstatic inline void CheckScoreSizes(OpKernelContext* context, int num_boxes,\n const Tensor& scores) {\n OP_REQUIRES(context, scores.dims() == 1,\n errors::InvalidArgument(\n \"scores must be 1-D\", scores.shape().DebugString(),\n \" (Shape must be rank 1 but is rank \", scores.dims(), \")\"));\n OP_REQUIRES(\n context, scores.dim_size(0) == num_boxes,\n errors::InvalidArgument(\"scores has incompatible shape (Dimensions must \"\n \"be equal, but are \",\n num_boxes, \" and \", scores.dim_size(0), \")\"));\n}\nstatic inline void ParseAndCheckOverlapSizes(OpKernelContext* context,\n const Tensor& overlaps,\n int* num_boxes) {\n OP_REQUIRES(context, overlaps.dims() == 2,\n errors::InvalidArgument(\"overlaps must be 2-D\",\n overlaps.shape().DebugString()));\n *num_boxes = overlaps.dim_size(0);\n OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes,\n errors::InvalidArgument(\"overlaps must be square\",\n overlaps.shape().DebugString()));\n}\nstatic inline void ParseAndCheckBoxSizes(OpKernelContext* context,\n const Tensor& boxes, int* num_boxes) {\n OP_REQUIRES(context, boxes.dims() == 2,\n errors::InvalidArgument(\n \"boxes must be 2-D\", boxes.shape().DebugString(),\n \" (Shape must be rank 2 but is rank \", boxes.dims(), \")\"));\n *num_boxes = boxes.dim_size(0);\n OP_REQUIRES(context, boxes.dim_size(1) == 4,\n errors::InvalidArgument(\"boxes must have 4 columns (Dimension \"\n \"must be 4 but is \",\n boxes.dim_size(1), \")\"));\n}\nstatic inline void CheckCombinedNMSScoreSizes(OpKernelContext* context,\n int num_boxes,\n const Tensor& scores) {\n OP_REQUIRES(context, scores.dims() == 3,\n errors::InvalidArgument(\"scores must be 3-D\",\n scores.shape().DebugString()));\n OP_REQUIRES(context, scores.dim_size(1) == num_boxes,\n errors::InvalidArgument(\"scores has incompatible shape\"));\n}\nstatic inline void ParseAndCheckCombinedNMSBoxSizes(OpKernelContext* context,\n const Tensor& boxes,\n int* num_boxes,\n const int num_classes) {\n OP_REQUIRES(context, boxes.dims() == 4,\n errors::InvalidArgument(\"boxes must be 4-D\",\n boxes.shape().DebugString()));\n bool box_check = boxes.dim_size(2) == 1 || boxes.dim_size(2) == num_classes;\n OP_REQUIRES(context, box_check,\n errors::InvalidArgument(\n \"third dimension of boxes must be either 1 or num classes\"));\n *num_boxes = boxes.dim_size(1);\n OP_REQUIRES(context, boxes.dim_size(3) == 4,\n errors::InvalidArgument(\"boxes must have 4 columns\"));\n}\ntemplate \nstatic inline float IOU(typename TTypes::ConstTensor boxes, int i,\n int j) {\n const float ymin_i = Eigen::numext::mini(\n static_cast(boxes(i, 0)), static_cast(boxes(i, 2)));\n const float xmin_i = Eigen::numext::mini(\n static_cast(boxes(i, 1)), static_cast(boxes(i, 3)));\n const float ymax_i = Eigen::numext::maxi(\n static_cast(boxes(i, 0)), static_cast(boxes(i, 2)));\n const float xmax_i = Eigen::numext::maxi(\n static_cast(boxes(i, 1)), static_cast(boxes(i, 3)));\n const float ymin_j = Eigen::numext::mini(\n static_cast(boxes(j, 0)), static_cast(boxes(j, 2)));\n const float xmin_j = Eigen::numext::mini(\n static_cast(boxes(j, 1)), static_cast(boxes(j, 3)));\n const float ymax_j = Eigen::numext::maxi(\n static_cast(boxes(j, 0)), static_cast(boxes(j, 2)));\n const float xmax_j = Eigen::numext::maxi(\n static_cast(boxes(j, 1)), static_cast(boxes(j, 3)));\n const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);\n const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);\n if (area_i <= 0 || area_j <= 0) {\n return 0.0;\n }\n const float intersection_ymin = Eigen::numext::maxi(ymin_i, ymin_j);\n const float intersection_xmin = Eigen::numext::maxi(xmin_i, xmin_j);\n const float intersection_ymax = Eigen::numext::mini(ymax_i, ymax_j);\n const float intersection_xmax = Eigen::numext::mini(xmax_i, xmax_j);\n const float intersection_area =\n Eigen::numext::maxi(intersection_ymax - intersection_ymin, 0.0) *\n Eigen::numext::maxi(intersection_xmax - intersection_xmin, 0.0);\n return intersection_area / (area_i + area_j - intersection_area);\n}\nstatic inline float IOU(const float* boxes, int i, int j) {\n const float ymin_i = Eigen::numext::mini(boxes[i], boxes[i + 2]);\n const float xmin_i = Eigen::numext::mini(boxes[i + 1], boxes[i + 3]);\n const float ymax_i = Eigen::numext::maxi(boxes[i], boxes[i + 2]);\n const float xmax_i = Eigen::numext::maxi(boxes[i + 1], boxes[i + 3]);\n const float ymin_j = Eigen::numext::mini(boxes[j], boxes[j + 2]);\n const float xmin_j = Eigen::numext::mini(boxes[j + 1], boxes[j + 3]);\n const float ymax_j = Eigen::numext::maxi(boxes[j], boxes[j + 2]);\n const float xmax_j = Eigen::numext::maxi(boxes[j + 1], boxes[j + 3]);\n const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);\n const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);\n if (area_i <= 0 || area_j <= 0) {\n return 0.0;\n }\n const float intersection_ymin = Eigen::numext::maxi(ymin_i, ymin_j);\n const float intersection_xmin = Eigen::numext::maxi(xmin_i, xmin_j);\n const float intersection_ymax = Eigen::numext::mini(ymax_i, ymax_j);\n const float intersection_xmax = Eigen::numext::mini(xmax_i, xmax_j);\n const float intersection_area =\n Eigen::numext::maxi(intersection_ymax - intersection_ymin, 0.0) *\n Eigen::numext::maxi(intersection_xmax - intersection_xmin, 0.0);\n return intersection_area / (area_i + area_j - intersection_area);\n}\ntemplate \nstatic inline T Overlap(typename TTypes::ConstTensor overlaps, int i,\n int j) {\n return overlaps(i, j);\n}\ntemplate \nstatic inline std::function CreateIOUSimilarityFn(\n const Tensor& boxes) {\n typename TTypes::ConstTensor boxes_data = boxes.tensor();\n return std::bind(&IOU, boxes_data, std::placeholders::_1,\n std::placeholders::_2);\n}\ntemplate \nstatic inline std::function CreateOverlapSimilarityFn(\n const Tensor& overlaps) {\n typename TTypes::ConstTensor overlaps_data =\n overlaps.tensor();\n return std::bind(&Overlap, overlaps_data, std::placeholders::_1,\n std::placeholders::_2);\n}\ntemplate \nvoid DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores,\n int num_boxes, const Tensor& max_output_size,\n const T similarity_threshold,\n const T score_threshold, const T soft_nms_sigma,\n const std::function& similarity_fn,\n bool return_scores_tensor = false,\n bool pad_to_max_output_size = false,\n int* ptr_num_valid_outputs = nullptr) {\n const int output_size = max_output_size.scalar()();\n OP_REQUIRES(context, output_size >= 0,\n errors::InvalidArgument(\"output size must be non-negative\"));\n std::vector scores_data(num_boxes);\n std::copy_n(scores.flat().data(), num_boxes, scores_data.begin());\n struct Candidate {\n int box_index;\n T score;\n int suppress_begin_index;\n };\n auto cmp = [](const Candidate bs_i, const Candidate bs_j) {\n return ((bs_i.score == bs_j.score) && (bs_i.box_index > bs_j.box_index)) ||\n bs_i.score < bs_j.score;\n };\n std::priority_queue, decltype(cmp)>\n candidate_priority_queue(cmp);\n for (int i = 0; i < scores_data.size(); ++i) {\n if (scores_data[i] > score_threshold) {\n candidate_priority_queue.emplace(Candidate({i, scores_data[i], 0}));\n }\n }\n T scale = static_cast(0.0);\n bool is_soft_nms = soft_nms_sigma > static_cast(0.0);\n if (is_soft_nms) {\n scale = static_cast(-0.5) / soft_nms_sigma;\n }\n auto suppress_weight = [similarity_threshold, scale,\n is_soft_nms](const T sim) {\n const T weight = Eigen::numext::exp(scale * sim * sim);\n return is_soft_nms || sim <= similarity_threshold ? weight\n : static_cast(0.0);\n };\n std::vector selected;\n std::vector selected_scores;\n float similarity;\n T original_score;\n Candidate next_candidate;\n while (selected.size() < output_size && !candidate_priority_queue.empty()) {\n next_candidate = candidate_priority_queue.top();\n original_score = next_candidate.score;\n candidate_priority_queue.pop();\n bool should_hard_suppress = false;\n for (int j = static_cast(selected.size()) - 1;\n j >= next_candidate.suppress_begin_index; --j) {\n similarity = similarity_fn(next_candidate.box_index, selected[j]);\n next_candidate.score *= suppress_weight(static_cast(similarity));\n if (!is_soft_nms && static_cast(similarity) > similarity_threshold) {\n should_hard_suppress = true;\n break;\n }\n if (next_candidate.score <= score_threshold) break;\n }\n next_candidate.suppress_begin_index = selected.size();\n if (!should_hard_suppress) {\n if (next_candidate.score == original_score) {\n selected.push_back(next_candidate.box_index);\n selected_scores.push_back(next_candidate.score);\n continue;\n }\n if (next_candidate.score > score_threshold) {\n candidate_priority_queue.push(next_candidate);\n }\n }\n }\n int num_valid_outputs = selected.size();\n if (pad_to_max_output_size) {\n selected.resize(output_size, 0);\n selected_scores.resize(output_size, static_cast(0));\n }\n if (ptr_num_valid_outputs) {\n *ptr_num_valid_outputs = num_valid_outputs;\n }\n Tensor* output_indices = nullptr;\n TensorShape output_shape({static_cast(selected.size())});\n OP_REQUIRES_OK(context,\n context->allocate_output(0, output_shape, &output_indices));\n TTypes::Tensor output_indices_data = output_indices->tensor();\n std::copy_n(selected.begin(), selected.size(), output_indices_data.data());\n if (return_scores_tensor) {\n Tensor* output_scores = nullptr;\n OP_REQUIRES_OK(context,\n context->allocate_output(1, output_shape, &output_scores));\n typename TTypes::Tensor output_scores_data =\n output_scores->tensor();\n std::copy_n(selected_scores.begin(), selected_scores.size(),\n output_scores_data.data());\n }\n}\nstruct ResultCandidate {\n int box_index;\n float score;\n int class_idx;\n float box_coord[4];\n};\nvoid DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data,\n const float* scores_data, int num_boxes, int q,\n int num_classes, const int size_per_class,\n const float score_threshold, const float iou_threshold,\n std::vector& result_candidate_vec) {\n struct Candidate {\n int box_index;\n float score;\n };\n auto cmp = [](const Candidate bs_i, const Candidate bs_j) {\n return bs_i.score < bs_j.score;\n };\n std::priority_queue, decltype(cmp)>\n candidate_priority_queue(cmp);\n float temp_score;\n for (int i = 0; i < num_boxes; ++i) {\n temp_score = scores_data[i * num_classes + class_idx];\n if (temp_score > score_threshold) {\n candidate_priority_queue.emplace(Candidate({i, temp_score}));\n }\n }\n std::vector selected;\n Candidate next_candidate;\n int candidate_box_data_idx, selected_box_data_idx, class_box_idx;\n class_box_idx = (q > 1) ? class_idx : 0;\n float iou;\n while (selected.size() < size_per_class &&\n !candidate_priority_queue.empty()) {\n next_candidate = candidate_priority_queue.top();\n candidate_priority_queue.pop();\n candidate_box_data_idx = (next_candidate.box_index * q + class_box_idx) * 4;\n bool should_select = true;\n for (int j = selected.size() - 1; j >= 0; --j) {\n selected_box_data_idx = (selected[j] * q + class_box_idx) * 4;\n iou = IOU(boxes_data, candidate_box_data_idx, selected_box_data_idx);\n if (iou > iou_threshold) {\n should_select = false;\n break;\n }\n }\n if (should_select) {\n result_candidate_vec[selected.size() + size_per_class * class_idx] = {\n next_candidate.box_index,\n next_candidate.score,\n class_idx,\n {boxes_data[candidate_box_data_idx],\n boxes_data[candidate_box_data_idx + 1],\n boxes_data[candidate_box_data_idx + 2],\n boxes_data[candidate_box_data_idx + 3]}};\n selected.push_back(next_candidate.box_index);\n }\n }\n}\nvoid SelectResultPerBatch(std::vector& nmsed_boxes,\n std::vector& nmsed_scores,\n std::vector& nmsed_classes,\n std::vector& result_candidate_vec,\n std::vector& final_valid_detections,\n const int batch_idx, int total_size_per_batch,\n bool pad_per_class, int max_size_per_batch,\n bool clip_boxes, int per_batch_size) {\n auto rc_cmp = [](const ResultCandidate rc_i, const ResultCandidate rc_j) {\n return rc_i.score > rc_j.score;\n };\n std::sort(result_candidate_vec.begin(), result_candidate_vec.end(), rc_cmp);\n int max_detections = 0;\n int result_candidate_size =\n std::count_if(result_candidate_vec.begin(), result_candidate_vec.end(),\n [](ResultCandidate rc) { return rc.box_index > -1; });\n if (!pad_per_class) {\n max_detections = std::min(result_candidate_size, total_size_per_batch);\n } else {\n max_detections = std::min(per_batch_size, result_candidate_size);\n }\n final_valid_detections[batch_idx] = max_detections;\n int curr_total_size = max_detections;\n int result_idx = 0;\n while (curr_total_size > 0 && result_idx < result_candidate_vec.size()) {\n ResultCandidate next_candidate = result_candidate_vec[result_idx++];\n if (clip_boxes) {\n const float box_min = 0.0;\n const float box_max = 1.0;\n nmsed_boxes.push_back(\n std::max(std::min(next_candidate.box_coord[0], box_max), box_min));\n nmsed_boxes.push_back(\n std::max(std::min(next_candidate.box_coord[1], box_max), box_min));\n nmsed_boxes.push_back(\n std::max(std::min(next_candidate.box_coord[2], box_max), box_min));\n nmsed_boxes.push_back(\n std::max(std::min(next_candidate.box_coord[3], box_max), box_min));\n } else {\n nmsed_boxes.push_back(next_candidate.box_coord[0]);\n nmsed_boxes.push_back(next_candidate.box_coord[1]);\n nmsed_boxes.push_back(next_candidate.box_coord[2]);\n nmsed_boxes.push_back(next_candidate.box_coord[3]);\n }\n nmsed_scores.push_back(next_candidate.score);\n nmsed_classes.push_back(next_candidate.class_idx);\n curr_total_size--;\n }\n nmsed_boxes.resize(per_batch_size * 4, 0);\n nmsed_scores.resize(per_batch_size, 0);\n nmsed_classes.resize(per_batch_size, 0);\n}\nvoid BatchedNonMaxSuppressionOp(\n OpKernelContext* context, const Tensor& inp_boxes, const Tensor& inp_scores,\n int num_boxes, const int max_size_per_class, const int total_size_per_batch,\n const float score_threshold, const float iou_threshold,\n bool pad_per_class = false, bool clip_boxes = true) {\n const int num_batches = inp_boxes.dim_size(0);\n int num_classes = inp_scores.dim_size(2);\n int q = inp_boxes.dim_size(2);\n const float* scores_data =\n const_cast(inp_scores.flat().data());\n const float* boxes_data = const_cast(inp_boxes.flat().data());\n int boxes_per_batch = num_boxes * q * 4;\n int scores_per_batch = num_boxes * num_classes;\n const int size_per_class = std::min(max_size_per_class, num_boxes);\n std::vector> result_candidate_vec(\n num_batches,\n std::vector(size_per_class * num_classes,\n {-1, -1.0, -1, {0.0, 0.0, 0.0, 0.0}}));\n std::vector> nmsed_boxes(num_batches);\n std::vector> nmsed_scores(num_batches);\n std::vector> nmsed_classes(num_batches);\n std::vector final_valid_detections(num_batches);\n auto shard_nms = [&](int begin, int end) {\n for (int idx = begin; idx < end; ++idx) {\n int batch_idx = idx / num_classes;\n int class_idx = idx % num_classes;\n DoNMSPerClass(batch_idx, class_idx,\n boxes_data + boxes_per_batch * batch_idx,\n scores_data + scores_per_batch * batch_idx, num_boxes, q,\n num_classes, size_per_class, score_threshold, iou_threshold,\n result_candidate_vec[batch_idx]);\n }\n };\n int length = num_batches * num_classes;\n int input_bytes = num_boxes * 10 * sizeof(float);\n int output_bytes = num_boxes * 10 * sizeof(float);\n int compute_cycles = Eigen::TensorOpCost::AddCost() * num_boxes * 14 +\n Eigen::TensorOpCost::MulCost() * num_boxes * 9 +\n Eigen::TensorOpCost::MulCost() * num_boxes * 9 +\n Eigen::TensorOpCost::AddCost() * num_boxes * 8;\n const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);\n const CPUDevice& d = context->eigen_device();\n d.parallelFor(length, cost, shard_nms);\n int per_batch_size = total_size_per_batch;\n int max_total_size = static_cast(\n std::min(static_cast(std::numeric_limits::max()),\n static_cast(max_size_per_class) * num_classes));\n if (pad_per_class) {\n per_batch_size = std::min(total_size_per_batch, max_total_size);\n }\n Tensor* valid_detections_t = nullptr;\n TensorShape valid_detections_shape({num_batches});\n OP_REQUIRES_OK(context, context->allocate_output(3, valid_detections_shape,\n &valid_detections_t));\n auto valid_detections_flat = valid_detections_t->template flat();\n auto shard_result = [&](int begin, int end) {\n for (int batch_idx = begin; batch_idx < end; ++batch_idx) {\n SelectResultPerBatch(\n nmsed_boxes[batch_idx], nmsed_scores[batch_idx],\n nmsed_classes[batch_idx], result_candidate_vec[batch_idx],\n final_valid_detections, batch_idx, total_size_per_batch,\n pad_per_class, max_total_size, clip_boxes, per_batch_size);\n valid_detections_flat(batch_idx) = final_valid_detections[batch_idx];\n }\n };\n length = num_batches;\n input_bytes =\n num_boxes * 10 * sizeof(float) + per_batch_size * 6 * sizeof(float);\n output_bytes =\n num_boxes * 5 * sizeof(float) + per_batch_size * 6 * sizeof(float);\n compute_cycles = Eigen::TensorOpCost::AddCost() * num_boxes * 5 +\n Eigen::TensorOpCost::AddCost() * num_boxes * 5;\n const Eigen::TensorOpCost cost_result(input_bytes, output_bytes,\n compute_cycles);\n d.parallelFor(length, cost_result, shard_result);\n Tensor* nmsed_boxes_t = nullptr;\n TensorShape boxes_shape({num_batches, per_batch_size, 4});\n OP_REQUIRES_OK(context,\n context->allocate_output(0, boxes_shape, &nmsed_boxes_t));\n auto nmsed_boxes_flat = nmsed_boxes_t->template flat();\n Tensor* nmsed_scores_t = nullptr;\n TensorShape scores_shape({num_batches, per_batch_size});\n OP_REQUIRES_OK(context,\n context->allocate_output(1, scores_shape, &nmsed_scores_t));\n auto nmsed_scores_flat = nmsed_scores_t->template flat();\n Tensor* nmsed_classes_t = nullptr;\n OP_REQUIRES_OK(context,\n context->allocate_output(2, scores_shape, &nmsed_classes_t));\n auto nmsed_classes_flat = nmsed_classes_t->template flat();\n auto shard_copy_result = [&](int begin, int end) {\n for (int idx = begin; idx < end; ++idx) {\n int batch_idx = idx / per_batch_size;\n int j = idx % per_batch_size;\n nmsed_scores_flat(idx) = nmsed_scores[batch_idx][j];\n nmsed_classes_flat(idx) = nmsed_classes[batch_idx][j];\n for (int k = 0; k < 4; ++k) {\n nmsed_boxes_flat(idx * 4 + k) = nmsed_boxes[batch_idx][j * 4 + k];\n }\n }\n };\n length = num_batches * per_batch_size;\n input_bytes = 6 * sizeof(float);\n output_bytes = 6 * sizeof(float);\n compute_cycles = Eigen::TensorOpCost::AddCost() * 2 +\n Eigen::TensorOpCost::MulCost() * 2 +\n Eigen::TensorOpCost::DivCost() * 2;\n const Eigen::TensorOpCost cost_copy_result(input_bytes, output_bytes,\n compute_cycles);\n d.parallelFor(length, cost_copy_result, shard_copy_result);\n}\ntemplate \nT GetScalar(const Tensor& tensor) {\n switch (tensor.dtype()) {\n case DT_FLOAT:\n return static_cast(tensor.scalar()());\n case DT_DOUBLE:\n return static_cast(tensor.scalar()());\n case DT_BFLOAT16:\n return static_cast(tensor.scalar()());\n case DT_HALF:\n return static_cast(tensor.scalar()());\n default:\n DCHECK(false) << \"Unsupported type \" << tensor.dtype();\n break;\n }\n return static_cast(0);\n}\n} \ntemplate \nclass NonMaxSuppressionOp : public OpKernel {\n public:\n explicit NonMaxSuppressionOp(OpKernelConstruction* context)\n : OpKernel(context) {\n OP_REQUIRES_OK(context, context->GetAttr(\"iou_threshold\", &iou_threshold_));\n }\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n int num_boxes = 0;\n ParseAndCheckBoxSizes(context, boxes, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateIOUSimilarityFn(boxes);\n const float score_threshold_val = std::numeric_limits::lowest();\n const float dummy_soft_nms_sigma = static_cast(0.0);\n DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,\n iou_threshold_, score_threshold_val,\n dummy_soft_nms_sigma, similarity_fn);\n }\n private:\n float iou_threshold_;\n};\ntemplate \nclass NonMaxSuppressionV2Op : public OpKernel {\n public:\n explicit NonMaxSuppressionV2Op(OpKernelConstruction* context)\n : OpKernel(context) {}\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n const Tensor& iou_threshold = context->input(3);\n OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),\n errors::InvalidArgument(\"iou_threshold must be 0-D, got shape \",\n iou_threshold.shape().DebugString()));\n const T iou_threshold_val = GetScalar(iou_threshold);\n OP_REQUIRES(context,\n iou_threshold_val >= static_cast(0.0) &&\n iou_threshold_val <= static_cast(1.0),\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n int num_boxes = 0;\n ParseAndCheckBoxSizes(context, boxes, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateIOUSimilarityFn(boxes);\n const T score_threshold_val = std::numeric_limits::lowest();\n const T dummy_soft_nms_sigma = static_cast(0.0);\n DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,\n iou_threshold_val, score_threshold_val,\n dummy_soft_nms_sigma, similarity_fn);\n }\n};\ntemplate \nclass NonMaxSuppressionV3Op : public OpKernel {\n public:\n explicit NonMaxSuppressionV3Op(OpKernelConstruction* context)\n : OpKernel(context) {}\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString(),\n \" (Shape must be rank 0 but is \", \"rank \",\n max_output_size.dims(), \")\"));\n const Tensor& iou_threshold = context->input(3);\n OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),\n errors::InvalidArgument(\"iou_threshold must be 0-D, got shape \",\n iou_threshold.shape().DebugString(),\n \" (Shape must be rank 0 but is rank \",\n iou_threshold.dims(), \")\"));\n const T iou_threshold_val = GetScalar(iou_threshold);\n OP_REQUIRES(context,\n iou_threshold_val >= static_cast(0.0) &&\n iou_threshold_val <= static_cast(1.0),\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n const Tensor& score_threshold = context->input(4);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(score_threshold.shape()),\n errors::InvalidArgument(\"score_threshold must be 0-D, got shape \",\n score_threshold.shape().DebugString()));\n const T score_threshold_val = GetScalar(score_threshold);\n int num_boxes = 0;\n ParseAndCheckBoxSizes(context, boxes, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateIOUSimilarityFn(boxes);\n const T dummy_soft_nms_sigma = static_cast(0.0);\n DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,\n iou_threshold_val, score_threshold_val,\n dummy_soft_nms_sigma, similarity_fn);\n }\n};\ntemplate \nclass NonMaxSuppressionV4Op : public OpKernel {\n public:\n explicit NonMaxSuppressionV4Op(OpKernelConstruction* context)\n : OpKernel(context) {\n OP_REQUIRES_OK(context, context->GetAttr(\"pad_to_max_output_size\",\n &pad_to_max_output_size_));\n }\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n const Tensor& iou_threshold = context->input(3);\n OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),\n errors::InvalidArgument(\"iou_threshold must be 0-D, got shape \",\n iou_threshold.shape().DebugString()));\n const T iou_threshold_val = GetScalar(iou_threshold);\n OP_REQUIRES(context,\n iou_threshold_val >= static_cast(0.0) &&\n iou_threshold_val <= static_cast(1.0),\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n const Tensor& score_threshold = context->input(4);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(score_threshold.shape()),\n errors::InvalidArgument(\"score_threshold must be 0-D, got shape \",\n score_threshold.shape().DebugString()));\n const T score_threshold_val = GetScalar(score_threshold);\n int num_boxes = 0;\n ParseAndCheckBoxSizes(context, boxes, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateIOUSimilarityFn(boxes);\n int num_valid_outputs;\n bool return_scores_tensor_ = false;\n const T dummy_soft_nms_sigma = static_cast(0.0);\n DoNonMaxSuppressionOp(\n context, scores, num_boxes, max_output_size, iou_threshold_val,\n score_threshold_val, dummy_soft_nms_sigma, similarity_fn,\n return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);\n if (!context->status().ok()) {\n return;\n }\n Tensor* num_outputs_t = nullptr;\n OP_REQUIRES_OK(context, context->allocate_output(\n 1, tensorflow::TensorShape{}, &num_outputs_t));\n num_outputs_t->scalar().setConstant(num_valid_outputs);\n }\n private:\n bool pad_to_max_output_size_;\n};\ntemplate \nclass NonMaxSuppressionV5Op : public OpKernel {\n public:\n explicit NonMaxSuppressionV5Op(OpKernelConstruction* context)\n : OpKernel(context) {\n OP_REQUIRES_OK(context, context->GetAttr(\"pad_to_max_output_size\",\n &pad_to_max_output_size_));\n }\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n const Tensor& iou_threshold = context->input(3);\n OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),\n errors::InvalidArgument(\"iou_threshold must be 0-D, got shape \",\n iou_threshold.shape().DebugString()));\n const T iou_threshold_val = iou_threshold.scalar()();\n OP_REQUIRES(context,\n iou_threshold_val >= static_cast(0.0) &&\n iou_threshold_val <= static_cast(1.0),\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n const Tensor& score_threshold = context->input(4);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(score_threshold.shape()),\n errors::InvalidArgument(\"score_threshold must be 0-D, got shape \",\n score_threshold.shape().DebugString()));\n const T score_threshold_val = score_threshold.scalar()();\n const Tensor& soft_nms_sigma = context->input(5);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(soft_nms_sigma.shape()),\n errors::InvalidArgument(\"soft_nms_sigma must be 0-D, got shape \",\n soft_nms_sigma.shape().DebugString()));\n const T soft_nms_sigma_val = soft_nms_sigma.scalar()();\n OP_REQUIRES(context, soft_nms_sigma_val >= static_cast(0.0),\n errors::InvalidArgument(\"soft_nms_sigma_val must be >= 0\"));\n int num_boxes = 0;\n ParseAndCheckBoxSizes(context, boxes, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateIOUSimilarityFn(boxes);\n int num_valid_outputs;\n const bool return_scores_tensor_ = true;\n DoNonMaxSuppressionOp(\n context, scores, num_boxes, max_output_size, iou_threshold_val,\n score_threshold_val, soft_nms_sigma_val, similarity_fn,\n return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);\n if (!context->status().ok()) {\n return;\n }\n Tensor* num_outputs_t = nullptr;\n OP_REQUIRES_OK(context, context->allocate_output(\n 2, tensorflow::TensorShape{}, &num_outputs_t));\n num_outputs_t->scalar().setConstant(num_valid_outputs);\n }\n private:\n bool pad_to_max_output_size_;\n};\ntemplate \nclass NonMaxSuppressionWithOverlapsOp : public OpKernel {\n public:\n explicit NonMaxSuppressionWithOverlapsOp(OpKernelConstruction* context)\n : OpKernel(context) {}\n void Compute(OpKernelContext* context) override {\n const Tensor& overlaps = context->input(0);\n const Tensor& scores = context->input(1);\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_output_size must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n const Tensor& overlap_threshold = context->input(3);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(overlap_threshold.shape()),\n errors::InvalidArgument(\"overlap_threshold must be 0-D, got shape \",\n overlap_threshold.shape().DebugString()));\n const float overlap_threshold_val = overlap_threshold.scalar()();\n const Tensor& score_threshold = context->input(4);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(score_threshold.shape()),\n errors::InvalidArgument(\"score_threshold must be 0-D, got shape \",\n score_threshold.shape().DebugString()));\n const float score_threshold_val = score_threshold.scalar()();\n int num_boxes = 0;\n ParseAndCheckOverlapSizes(context, overlaps, &num_boxes);\n CheckScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n auto similarity_fn = CreateOverlapSimilarityFn(overlaps);\n const float dummy_soft_nms_sigma = static_cast(0.0);\n DoNonMaxSuppressionOp(context, scores, num_boxes, max_output_size,\n overlap_threshold_val, score_threshold_val,\n dummy_soft_nms_sigma, similarity_fn);\n }\n};\ntemplate \nclass CombinedNonMaxSuppressionOp : public OpKernel {\n public:\n explicit CombinedNonMaxSuppressionOp(OpKernelConstruction* context)\n : OpKernel(context) {\n OP_REQUIRES_OK(context, context->GetAttr(\"pad_per_class\", &pad_per_class_));\n OP_REQUIRES_OK(context, context->GetAttr(\"clip_boxes\", &clip_boxes_));\n }\n void Compute(OpKernelContext* context) override {\n const Tensor& boxes = context->input(0);\n const Tensor& scores = context->input(1);\n OP_REQUIRES(\n context, (boxes.dim_size(0) == scores.dim_size(0)),\n errors::InvalidArgument(\"boxes and scores must have same batch size\"));\n const Tensor& max_output_size = context->input(2);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_output_size.shape()),\n errors::InvalidArgument(\"max_size_per_class must be 0-D, got shape \",\n max_output_size.shape().DebugString()));\n const int max_size_per_class = max_output_size.scalar()();\n OP_REQUIRES(context, max_size_per_class > 0,\n errors::InvalidArgument(\"max_size_per_class must be positive\"));\n const Tensor& max_total_size = context->input(3);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(max_total_size.shape()),\n errors::InvalidArgument(\"max_total_size must be 0-D, got shape \",\n max_total_size.shape().DebugString()));\n const int max_total_size_per_batch = max_total_size.scalar()();\n OP_REQUIRES(context, max_total_size_per_batch > 0,\n errors::InvalidArgument(\"max_total_size must be > 0\"));\n if (max_total_size_per_batch > pow(10, 6)) {\n LOG(WARNING) << \"Detected a large value for `max_total_size`. This may \"\n << \"cause OOM error. (max_total_size: \"\n << max_total_size.scalar()() << \")\";\n }\n const Tensor& iou_threshold = context->input(4);\n OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),\n errors::InvalidArgument(\"iou_threshold must be 0-D, got shape \",\n iou_threshold.shape().DebugString()));\n const float iou_threshold_val = iou_threshold.scalar()();\n const Tensor& score_threshold = context->input(5);\n OP_REQUIRES(\n context, TensorShapeUtils::IsScalar(score_threshold.shape()),\n errors::InvalidArgument(\"score_threshold must be 0-D, got shape \",\n score_threshold.shape().DebugString()));\n const float score_threshold_val = score_threshold.scalar()();\n OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,\n errors::InvalidArgument(\"iou_threshold must be in [0, 1]\"));\n int num_boxes = 0;\n const int num_classes = scores.dim_size(2);\n ParseAndCheckCombinedNMSBoxSizes(context, boxes, &num_boxes, num_classes);\n CheckCombinedNMSScoreSizes(context, num_boxes, scores);\n if (!context->status().ok()) {\n return;\n }\n BatchedNonMaxSuppressionOp(context, boxes, scores, num_boxes,\n max_size_per_class, max_total_size_per_batch,\n score_threshold_val, iou_threshold_val,\n pad_per_class_, clip_boxes_);\n }\n private:\n bool pad_per_class_;\n bool clip_boxes_;\n};\nREGISTER_KERNEL_BUILDER(Name(\"NonMaxSuppression\").Device(DEVICE_CPU),\n NonMaxSuppressionOp);\nREGISTER_KERNEL_BUILDER(\n Name(\"NonMaxSuppressionV2\").TypeConstraint(\"T\").Device(DEVICE_CPU),\n NonMaxSuppressionV2Op);\nREGISTER_KERNEL_BUILDER(Name(\"NonMaxSuppressionV2\")\n .TypeConstraint(\"T\")\n .Device(DEVICE_CPU),\n NonMaxSuppressionV2Op);\nREGISTER_KERNEL_BUILDER(\n Name(\"NonMaxSuppressionV3\").TypeConstraint(\"T\").Device(DEVICE_CPU),\n NonMaxSuppressionV3Op);\nREGISTER_KERNEL_BUILDER(Name(\"NonMaxSuppressionV3\")\n .TypeConstraint(\"T\")\n .Device(DEVICE_CPU),\n NonMaxSuppressionV3Op);\nREGISTER_KERNEL_BUILDER(\n Name(\"NonMaxSuppressionV4\").TypeConstraint(\"T\").Device(DEVICE_CPU),\n NonMaxSuppressionV4Op);\nREGISTER_KERNEL_BUILDER(Name(\"NonMaxSuppressionV4\")\n .TypeConstraint(\"T\")\n .Device(DEVICE_CPU),\n NonMaxSuppressionV4Op);\nREGISTER_KERNEL_BUILDER(\n Name(\"NonMaxSuppressionV5\").TypeConstraint(\"T\").Device(DEVICE_CPU),\n NonMaxSuppressionV5Op);\nREGISTER_KERNEL_BUILDER(Name(\"NonMaxSuppressionV5\")\n .TypeConstraint(\"T\")\n .Device(DEVICE_CPU),\n NonMaxSuppressionV5Op);\nREGISTER_KERNEL_BUILDER(\n Name(\"NonMaxSuppressionWithOverlaps\").Device(DEVICE_CPU),\n NonMaxSuppressionWithOverlapsOp);\nREGISTER_KERNEL_BUILDER(Name(\"CombinedNonMaxSuppression\").Device(DEVICE_CPU),\n CombinedNonMaxSuppressionOp);\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/framework/allocator.h\"\n#include \"tensorflow/core/framework/fake_input.h\"\n#include \"tensorflow/core/framework/node_def_builder.h\"\n#include \"tensorflow/core/framework/op_kernel.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/framework/types.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/kernels/ops_testutil.h\"\n#include \"tensorflow/core/kernels/ops_util.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nclass NonMaxSuppressionOpTest : public OpsTestBase {\n protected:\n void MakeOp(float iou_threshold) {\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\", \"NonMaxSuppression\")\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_INT32))\n .Attr(\"iou_threshold\", iou_threshold)\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClusters) {\n MakeOp(.5);\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClustersFlippedCoordinates) {\n MakeOp(.5);\n AddInputFromArray(TensorShape({6, 4}),\n {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,\n 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {\n MakeOp(.5);\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {2});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectWithNegativeScores) {\n MakeOp(.5);\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(\n TensorShape({6}), {.9f - 10.0f, .75f - 10.0f, .6f - 10.0f, .95f - 10.0f,\n .5f - 10.0f, .3f - 10.0f});\n AddInputFromArray(TensorShape({}), {6});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestFirstBoxDegenerate) {\n MakeOp(.5);\n AddInputFromArray(TensorShape({3, 4}),\n {0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3});\n AddInputFromArray(TensorShape({3}), {.9f, .75f, .6f});\n AddInputFromArray(TensorShape({}), {3});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {0, 1, 2});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) {\n MakeOp(.5);\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {30});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectSingleBox) {\n MakeOp(.5);\n AddInputFromArray(TensorShape({1, 4}), {0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestSelectFromTenIdenticalBoxes) {\n MakeOp(.5);\n int num_boxes = 10;\n std::vector corners(num_boxes * 4);\n std::vector scores(num_boxes);\n for (int i = 0; i < num_boxes; ++i) {\n corners[i * 4 + 0] = 0;\n corners[i * 4 + 1] = 0;\n corners[i * 4 + 2] = 1;\n corners[i * 4 + 3] = 1;\n scores[i] = .9;\n }\n AddInputFromArray(TensorShape({num_boxes, 4}), corners);\n AddInputFromArray(TensorShape({num_boxes}), scores);\n AddInputFromArray(TensorShape({}), {3});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionOpTest, TestInconsistentBoxAndScoreShapes) {\n MakeOp(.5);\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});\n AddInputFromArray(TensorShape({}), {30});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"scores has incompatible shape\"))\n << s;\n}\nTEST_F(NonMaxSuppressionOpTest, TestInvalidIOUThreshold) {\n MakeOp(1.2);\n AddInputFromArray(TensorShape({1, 4}), {0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(\n absl::StrContains(s.ToString(), \"iou_threshold must be in [0, 1]\"))\n << s;\n}\nTEST_F(NonMaxSuppressionOpTest, TestEmptyInput) {\n MakeOp(.5);\n AddInputFromArray(TensorShape({0, 4}), {});\n AddInputFromArray(TensorShape({0}), {});\n AddInputFromArray(TensorShape({}), {30});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({0}));\n test::FillValues(&expected, {});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nclass NonMaxSuppressionV2OpTest : public OpsTestBase {\n protected:\n void MakeOp() {\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\", \"NonMaxSuppressionV2\")\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(DT_FLOAT))\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClusters) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest,\n TestSelectFromThreeClustersFlippedCoordinates) {\n MakeOp();\n AddInputFromArray(TensorShape({6, 4}),\n {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,\n 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {2});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest,\n TestSelectAtMostThirtyBoxesFromThreeClusters) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestSelectSingleBox) {\n MakeOp();\n AddInputFromArray(TensorShape({1, 4}), {0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestSelectFromTenIdenticalBoxes) {\n MakeOp();\n int num_boxes = 10;\n std::vector corners(num_boxes * 4);\n std::vector scores(num_boxes);\n for (int i = 0; i < num_boxes; ++i) {\n corners[i * 4 + 0] = 0;\n corners[i * 4 + 1] = 0;\n corners[i * 4 + 2] = 1;\n corners[i * 4 + 3] = 1;\n scores[i] = .9;\n }\n AddInputFromArray(TensorShape({num_boxes, 4}), corners);\n AddInputFromArray(TensorShape({num_boxes}), scores);\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestInconsistentBoxAndScoreShapes) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"scores has incompatible shape\"))\n << s;\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestInvalidIOUThreshold) {\n MakeOp();\n AddInputFromArray(TensorShape({1, 4}), {0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {1.2f});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(\n absl::StrContains(s.ToString(), \"iou_threshold must be in [0, 1]\"))\n << s;\n}\nTEST_F(NonMaxSuppressionV2OpTest, TestEmptyInput) {\n MakeOp();\n AddInputFromArray(TensorShape({0, 4}), {});\n AddInputFromArray(TensorShape({0}), {});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({0}));\n test::FillValues(&expected, {});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nusing NmsValidTypes =\n ::testing::Types, std::pair,\n std::pair,\n std::pair >;\ntemplate \nclass NonMaxSuppressionV3OpTest : public OpsTestBase {\n protected:\n using InputType = typename InputAndThresholdTypes::first_type;\n using ThresholdType = typename InputAndThresholdTypes::second_type;\n void MakeOp() {\n constexpr DataType kInputDataType = DataTypeToEnum::value;\n constexpr DataType kThresholdDataType =\n DataTypeToEnum::value;\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\", \"NonMaxSuppressionV3\")\n .Input(FakeInput(kInputDataType))\n .Input(FakeInput(kInputDataType))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(kThresholdDataType))\n .Input(FakeInput(kThresholdDataType))\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTYPED_TEST_SUITE(NonMaxSuppressionV3OpTest, NmsValidTypes);\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClusters) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest,\n TestSelectFromThreeClustersWithScoreThreshold) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {0.5f});\n this->template AddInputFromList(TensorShape({}), {0.4f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest,\n TestSelectFromThreeClustersWithScoreThresholdZeroScores) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.1, 0, 0, .3, .2, -5.0});\n this->template AddInputFromList(TensorShape({}), {6});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {-3.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest,\n TestSelectFromThreeClustersFlippedCoordinates) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,\n 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest,\n TestSelectAtMostTwoBoxesFromThreeClusters) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {2});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest,\n TestSelectAtMostThirtyBoxesFromThreeClusters) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {30});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectSingleBox) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(TensorShape({1, 4}), {0, 0, 1, 1});\n this->template AddInputFromList(TensorShape({1}), {.9f});\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {0.5});\n this->template AddInputFromList(TensorShape({}), {0});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromTenIdenticalBoxes) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n int num_boxes = 10;\n std::vector corners(num_boxes * 4);\n std::vector scores(num_boxes);\n for (int i = 0; i < num_boxes; ++i) {\n corners[i * 4 + 0] = static_cast(0);\n corners[i * 4 + 1] = static_cast(0);\n corners[i * 4 + 2] = static_cast(1);\n corners[i * 4 + 3] = static_cast(1);\n scores[i] = static_cast(.9);\n }\n this->template AddInputFromArray(TensorShape({num_boxes, 4}),\n corners);\n this->template AddInputFromArray(TensorShape({num_boxes}), scores);\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestInconsistentBoxAndScoreShapes) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({5}),\n {.9f, .75f, .6f, .95f, .5f});\n this->template AddInputFromList(TensorShape({}), {30});\n this->template AddInputFromList(TensorShape({}), {0.5});\n this->template AddInputFromList(TensorShape({}), {0});\n Status s = this->RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"scores has incompatible shape\"))\n << s;\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestInvalidIOUThreshold) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(TensorShape({1, 4}), {0, 0, 1, 1});\n this->template AddInputFromList(TensorShape({1}), {.9f});\n this->template AddInputFromList(TensorShape({}), {3});\n this->template AddInputFromList(TensorShape({}), {1.2f});\n this->template AddInputFromList(TensorShape({}), {0});\n Status s = this->RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(\n absl::StrContains(s.ToString(), \"iou_threshold must be in [0, 1]\"))\n << s;\n}\nTYPED_TEST(NonMaxSuppressionV3OpTest, TestEmptyInput) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromArray(TensorShape({0, 4}), {});\n this->template AddInputFromArray(TensorShape({0}), {});\n this->template AddInputFromList(TensorShape({}), {30});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n Tensor expected(this->allocator(), DT_INT32, TensorShape({0}));\n test::FillValues(&expected, {});\n test::ExpectTensorEqual(expected, *(this->GetOutput(0)));\n}\ntemplate \nclass NonMaxSuppressionV4OpTest : public OpsTestBase {\n protected:\n using InputType = typename InputAndThresholdTypes::first_type;\n using ThresholdType = typename InputAndThresholdTypes::second_type;\n void MakeOp() {\n constexpr DataType kInputDataType = DataTypeToEnum::value;\n constexpr DataType kThresholdDataType =\n DataTypeToEnum::value;\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\", \"NonMaxSuppressionV4\")\n .Input(FakeInput(kInputDataType))\n .Input(FakeInput(kInputDataType))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(kThresholdDataType))\n .Input(FakeInput(kThresholdDataType))\n .Attr(\"pad_to_max_output_size\", true)\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTYPED_TEST_SUITE(NonMaxSuppressionV4OpTest, NmsValidTypes);\nTYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFive) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {5});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.0f});\n TF_ASSERT_OK(this->RunOpKernel());\n const auto expected_indices = test::AsTensor({3, 0, 5, 0, 0});\n test::ExpectTensorEqual(expected_indices, *(this->GetOutput(0)));\n Tensor expected_num_valid = test::AsScalar(3);\n test::ExpectTensorEqual(expected_num_valid, *(this->GetOutput(1)));\n}\nTYPED_TEST(NonMaxSuppressionV4OpTest,\n TestSelectFromThreeClustersPadFiveScoreThr) {\n using InputType = typename TestFixture::InputType;\n using ThresholdType = typename TestFixture::ThresholdType;\n this->MakeOp();\n this->template AddInputFromList(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n this->template AddInputFromList(TensorShape({6}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n this->template AddInputFromList(TensorShape({}), {6});\n this->template AddInputFromList(TensorShape({}), {.5f});\n this->template AddInputFromList(TensorShape({}), {0.4f});\n TF_ASSERT_OK(this->RunOpKernel());\n const auto expected_indices = test::AsTensor({3, 0, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_indices, *(this->GetOutput(0)));\n Tensor expected_num_valid = test::AsScalar(2);\n test::ExpectTensorEqual(expected_num_valid, *(this->GetOutput(1)));\n}\nclass NonMaxSuppressionV5OpTest : public OpsTestBase {\n protected:\n void MakeOp() {\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\", \"NonMaxSuppressionV5\")\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Attr(\"pad_to_max_output_size\", true)\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersPadFive) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {5});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n const auto expected_indices = test::AsTensor({3, 0, 5, 0, 0});\n test::ExpectTensorEqual(expected_indices, *GetOutput(0));\n const auto expected_scores =\n test::AsTensor({.95f, .9f, .3f, 0.0f, 0.0f});\n test::ExpectTensorNear(expected_scores, *GetOutput(1), 1e-2);\n Tensor expected_num_valid = test::AsScalar(3);\n test::ExpectTensorEqual(expected_num_valid, *GetOutput(2));\n}\nTEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersWithSoftNMS) {\n MakeOp();\n AddInputFromArray(\n TensorShape({6, 4}),\n {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {6});\n AddInputFromArray(TensorShape({}), {0.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n AddInputFromArray(TensorShape({}), {0.5f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({6}));\n test::FillValues(&expected, {3, 0, 1, 5, 4, 2});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({6}));\n test::FillValues(&expected_scores,\n {0.95, 0.9, 0.384, 0.3, 0.256, 0.197});\n test::ExpectTensorNear(expected_scores, *GetOutput(1), 1e-2);\n Tensor expected_num_valid = test::AsScalar(6);\n test::ExpectTensorEqual(expected_num_valid, *GetOutput(2));\n}\nclass NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase {\n protected:\n void MakeOp() {\n TF_EXPECT_OK(NodeDefBuilder(\"non_max_suppression_op\",\n \"NonMaxSuppressionWithOverlaps\")\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n void AddIoUInput(const std::vector& boxes) {\n ASSERT_EQ((boxes.size() % 4), 0);\n size_t num_boxes = boxes.size() / 4;\n std::vector iou_overlaps(num_boxes * num_boxes);\n auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) {\n return boxes[box_idx * 4 + corner_idx];\n };\n for (size_t i = 0; i < num_boxes; ++i) {\n for (size_t j = 0; j < num_boxes; ++j) {\n const float ymin_i =\n std::min(corner_access(i, 0), corner_access(i, 2));\n const float xmin_i =\n std::min(corner_access(i, 1), corner_access(i, 3));\n const float ymax_i =\n std::max(corner_access(i, 0), corner_access(i, 2));\n const float xmax_i =\n std::max(corner_access(i, 1), corner_access(i, 3));\n const float ymin_j =\n std::min(corner_access(j, 0), corner_access(j, 2));\n const float xmin_j =\n std::min(corner_access(j, 1), corner_access(j, 3));\n const float ymax_j =\n std::max(corner_access(j, 0), corner_access(j, 2));\n const float xmax_j =\n std::max(corner_access(j, 1), corner_access(j, 3));\n const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);\n const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);\n float iou;\n if (area_i <= 0 || area_j <= 0) {\n iou = 0.0;\n } else {\n const float intersection_ymin = std::max(ymin_i, ymin_j);\n const float intersection_xmin = std::max(xmin_i, xmin_j);\n const float intersection_ymax = std::min(ymax_i, ymax_j);\n const float intersection_xmax = std::min(xmax_i, xmax_j);\n const float intersection_area =\n std::max(intersection_ymax - intersection_ymin, 0.0) *\n std::max(intersection_xmax - intersection_xmin, 0.0);\n iou = intersection_area / (area_i + area_j - intersection_area);\n }\n iou_overlaps[i * num_boxes + j] = iou;\n }\n }\n AddInputFromArray(TensorShape({static_cast(num_boxes),\n static_cast(num_boxes)}),\n iou_overlaps);\n }\n};\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClusters) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest,\n TestSelectFromThreeClustersFlippedCoordinates) {\n MakeOp();\n AddIoUInput({1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,\n 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest,\n TestSelectAtMostTwoBoxesFromThreeClusters) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {2});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected, {3, 0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest,\n TestSelectAtMostThirtyBoxesFromThreeClusters) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({3}));\n test::FillValues(&expected, {3, 0, 5});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectSingleBox) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromTenIdenticalBoxes) {\n MakeOp();\n int num_boxes = 10;\n std::vector corners(num_boxes * 4);\n std::vector scores(num_boxes);\n for (int i = 0; i < num_boxes; ++i) {\n corners[i * 4 + 0] = 0;\n corners[i * 4 + 1] = 0;\n corners[i * 4 + 2] = 1;\n corners[i * 4 + 3] = 1;\n scores[i] = .9;\n }\n AddIoUInput(corners);\n AddInputFromArray(TensorShape({num_boxes}), scores);\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected, {0});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInconsistentBoxAndScoreShapes) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,\n 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});\n AddInputFromArray(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"scores has incompatible shape\"))\n << s;\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInvalidOverlapsShape) {\n MakeOp();\n AddInputFromArray(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});\n AddInputFromArray(TensorShape({2}), {0.5f, 0.5f});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {0.f});\n AddInputFromArray(TensorShape({}), {0.0f});\n Status s = RunOpKernel();\n ASSERT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"overlaps must be square\")) << s;\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdGreaterOne) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {1.2f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdSmallerZero) {\n MakeOp();\n AddIoUInput({0, 0, 1, 1});\n AddInputFromArray(TensorShape({1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {-0.2f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n}\nTEST_F(NonMaxSuppressionWithOverlapsOpTest, TestEmptyInput) {\n MakeOp();\n AddIoUInput({});\n AddInputFromArray(TensorShape({0}), {});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected(allocator(), DT_INT32, TensorShape({0}));\n test::FillValues(&expected, {});\n test::ExpectTensorEqual(expected, *GetOutput(0));\n}\nclass CombinedNonMaxSuppressionOpTest : public OpsTestBase {\n protected:\n void MakeOp(bool pad_per_class = false, bool clip_boxes = true) {\n TF_EXPECT_OK(NodeDefBuilder(\"combined_non_max_suppression_op\",\n \"CombinedNonMaxSuppression\")\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(DT_INT32))\n .Input(FakeInput(DT_FLOAT))\n .Input(FakeInput(DT_FLOAT))\n .Attr(\"pad_per_class\", pad_per_class)\n .Attr(\"clip_boxes\", clip_boxes)\n .Finalize(node_def()));\n TF_EXPECT_OK(InitOp());\n }\n};\nTEST_F(CombinedNonMaxSuppressionOpTest, TestEmptyInput) {\n MakeOp();\n AddInputFromArray(TensorShape({0, 0, 0, 4}), {});\n AddInputFromArray(TensorShape({0, 0, 0}), {});\n AddInputFromArray(TensorShape({}), {30});\n AddInputFromArray(TensorShape({}), {10});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({0, 10, 4}));\n test::FillValues(&expected_boxes, {});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({0, 10}));\n test::FillValues(&expected_scores, {});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({0, 10}));\n test::FillValues(&expected_classes, {});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({0}));\n test::FillValues(&expected_valid_d, {});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClusters) {\n MakeOp();\n AddInputFromArray(\n TensorShape({1, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});\n AddInputFromArray(TensorShape({1, 6, 1}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.3, 1, 0.4});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0.3});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_classes, {0, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected_valid_d, {3});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromThreeClustersNoBoxClipping) {\n MakeOp(false, false);\n AddInputFromArray(TensorShape({1, 6, 1, 4}),\n {0, 0, 10, 10, 0, 1, 10, 11, 0, 1, 10, 9,\n 0, 11, 10, 20, 0, 12, 10, 21, 0, 30, 100, 40});\n AddInputFromArray(TensorShape({1, 6, 1}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 11, 10, 20, 0, 0, 10, 10, 0, 30, 100, 40});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0.3});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_classes, {0, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected_valid_d, {3});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromThreeClustersWithScoreThreshold) {\n MakeOp();\n AddInputFromArray(\n TensorShape({1, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});\n AddInputFromArray(TensorShape({1, 6, 1}),\n {.9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.4f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));\n test::FillValues(&expected_classes, {0, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected_valid_d, {2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromThreeClustersWithScoreThresholdZeroScores) {\n MakeOp();\n AddInputFromArray(\n TensorShape({1, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});\n AddInputFromArray(TensorShape({1, 6, 1}),\n {.1f, 0, 0, .3f, .2f, -5.0f});\n AddInputFromArray(TensorShape({}), {4});\n AddInputFromArray(TensorShape({}), {5});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {-3.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 5, 4}));\n test::FillValues(\n &expected_boxes,\n {\n 0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n });\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 5}));\n test::FillValues(&expected_scores, {0.3, 0.1, 0, 0, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 5}));\n test::FillValues(&expected_classes, {0, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected_valid_d, {2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest, TestSelectSingleBox) {\n MakeOp();\n AddInputFromArray(TensorShape({1, 1, 1, 4}), {0, 0, 1, 1});\n AddInputFromArray(TensorShape({1, 1, 1}), {.9f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {1});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 1, 4}));\n test::FillValues(&expected_boxes, {0, 0, 1, 1});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 1}));\n test::FillValues(&expected_scores, {0.9});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 1}));\n test::FillValues(&expected_classes, {0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));\n test::FillValues(&expected_valid_d, {1});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesWithScoreThreshold) {\n MakeOp();\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(\n TensorShape({2, 6, 1}),\n {.9f, .75f, .6f, .95f, .5f, .3f, .9f, .75f, .6f, .95f, .5f, .3f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.4f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_classes, {0, 0, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {2, 2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClasses) {\n MakeOp();\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));\n test::FillValues(\n &expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0.75, 0.95, 0.9, 0.75});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_classes, {0, 1, 0, 0, 1, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {3, 3});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesTwoClassesWithScoreThreshold) {\n MakeOp();\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.8f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_classes, {0, 1, 0, 0, 1, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {2, 2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedTotalSize) {\n MakeOp(true);\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {10});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.8f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));\n test::FillValues(&expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_classes, {0, 1, 0, 0, 1, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {2, 2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedPerClass) {\n MakeOp(true);\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {2});\n AddInputFromArray(TensorShape({}), {50});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.8f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 4, 4}));\n test::FillValues(\n &expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 4}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0, 0, 0.95, 0.9, 0, 0});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 4}));\n test::FillValues(&expected_classes, {0, 1, 0, 0, 0, 1, 0, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {2, 2});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesTwoClassesTotalSize) {\n MakeOp();\n AddInputFromArray(\n TensorShape({2, 6, 1, 4}),\n {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,\n 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,\n 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {5});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.1f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 5, 4}));\n test::FillValues(\n &expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f,\n 0.1, 0.11f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f,\n 0.2, 0.22f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 5}));\n test::FillValues(\n &expected_scores, {0.95, 0.9, 0.75, 0.5, 0.3, 0.95, 0.9, 0.75, 0.5, 0.3});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 5}));\n test::FillValues(&expected_classes, {0, 1, 0, 1, 0, 0, 1, 0, 1, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {5, 5});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\nTEST_F(CombinedNonMaxSuppressionOpTest,\n TestSelectFromTwoBatchesTwoClassesForBoxesAndScores) {\n MakeOp();\n AddInputFromArray(\n TensorShape({2, 6, 2, 4}),\n {0, 0, 0.1, 0.1, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.6f, 0.1, 0.7f,\n 0, -0.01, 0.1, 0.09f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.11,\n 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0,\n 0.3, 1, 0.4,\n 0, 0, 0.2, 0.2, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, 0.02f, 0.2,\n 0.22f, 0, -0.02, 0.2, 0.19f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0,\n 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1,\n 0.5, 0, 0.4, 1, 0.5});\n AddInputFromArray(TensorShape({2, 6, 2}),\n {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,\n 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,\n 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {3});\n AddInputFromArray(TensorShape({}), {.5f});\n AddInputFromArray(TensorShape({}), {0.0f});\n TF_ASSERT_OK(RunOpKernel());\n Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));\n test::FillValues(\n &expected_boxes,\n {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.6f, 0.1, 0.7f,\n 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});\n test::ExpectTensorEqual(expected_boxes, *GetOutput(0));\n Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_scores, {0.95, 0.9, 0.8, 0.95, 0.9, 0.75});\n test::ExpectTensorEqual(expected_scores, *GetOutput(1));\n Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));\n test::FillValues(&expected_classes, {0, 1, 1, 0, 1, 0});\n test::ExpectTensorEqual(expected_classes, *GetOutput(2));\n Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));\n test::FillValues(&expected_valid_d, {3, 3});\n test::ExpectTensorEqual(expected_valid_d, *GetOutput(3));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":227,"cells":{"ID":{"kind":"string","value":"c3e707eb-aa99-42f0-8beb-c5644b98862f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"device_propagation"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/common_runtime/device_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/common_runtime/device_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/device_propagation.h\"\n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"tensorflow/core/framework/types.h\"\n#include \"tensorflow/core/graph/algorithm.h\"\n#include \"tensorflow/core/graph/graph.h\"\nnamespace tensorflow {\nnamespace {\nconst std::string& AssignedOrRequestedDevice(const Node& node) {\n if (!node.assigned_device_name().empty()) {\n return node.assigned_device_name();\n }\n return node.requested_device();\n}\nbool UpdateDeviceFromInputs(\n const device_propagation::NodeFilter& node_filter,\n const device_propagation::DeviceFilter& device_filter, Node* node) {\n if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) {\n return false;\n }\n string proposed_device = \"\";\n Node* proposed_src = nullptr;\n for (const Edge* e : node->in_edges()) {\n if (e->IsControlEdge()) {\n continue;\n }\n Node* src = e->src();\n const string& src_device = AssignedOrRequestedDevice(*src);\n if ((node->IsSwitch() && src->IsLoopCond()) ||\n (node->IsMerge() && src->IsEnter())) {\n continue;\n }\n if (!device_filter(src_device)) return false;\n if (proposed_src == nullptr) {\n proposed_device = src_device;\n proposed_src = src;\n } else if (proposed_device != src_device) {\n return false;\n }\n }\n if (proposed_src) {\n node->set_assigned_device_name(proposed_src->assigned_device_name());\n node->set_requested_device(proposed_src->requested_device());\n return true;\n } else {\n return false;\n }\n}\n} \nvoid PropagateDevices(const device_propagation::NodeFilter& node_filter,\n const device_propagation::DeviceFilter& device_filter,\n Graph* graph) {\n bool nodes_changed = true;\n while (nodes_changed) {\n nodes_changed = false;\n BreadthFirstTraversal(\n *graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) {\n nodes_changed |=\n UpdateDeviceFromInputs(node_filter, device_filter, node);\n });\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/device_propagation.h\"\n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/match.h\"\n#include \"tensorflow/cc/framework/scope.h\"\n#include \"tensorflow/cc/ops/array_ops.h\"\n#include \"tensorflow/cc/ops/control_flow_ops.h\"\n#include \"tensorflow/cc/ops/control_flow_ops_internal.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\nusing ::testing::UnorderedElementsAreArray;\nnamespace tensorflow {\nnamespace {\nconst char kTpu0[] = \"/job:localhost/replica:0/task:0/device:TPU:0\";\nconst char kTpu1[] = \"/job:localhost/replica:0/task:0/device:TPU:1\";\nconst char kTpu2[] = \"/job:localhost/replica:0/task:0/device:TPU:2\";\nconst char kGpu0[] = \"/job:localhost/replica:0/task:0/device:GPU:0\";\nbool IsTPUDevice(StringPiece device_name) {\n return absl::StrContains(device_name, \"device:TPU:\");\n}\ndevice_propagation::NodeFilter TargetOps(\n const absl::flat_hash_set& ops) {\n return [&ops](const Node& n) { return ops.contains(n.type_string()); };\n}\nabsl::flat_hash_map GetNodeNameDevices(\n const Graph& graph) {\n absl::flat_hash_map node_name_devices;\n for (const Node* node : graph.nodes()) {\n if (node->IsSource() || node->IsSink()) {\n continue;\n }\n const string& device = node->assigned_device_name().empty()\n ? node->requested_device()\n : node->assigned_device_name();\n node_name_devices[node->name()] = device;\n }\n return node_name_devices;\n}\nTEST(DevicePropagationTest, PropagateTPUDevices) {\n Scope scope = Scope::NewRootScope().ExitOnError();\n auto a = ops::Placeholder(scope.WithOpName(\"A\"), DT_FLOAT);\n a.node()->set_assigned_device_name(kTpu0);\n auto b = ops::Placeholder(scope.WithOpName(\"B\"), DT_FLOAT);\n b.node()->set_assigned_device_name(kTpu1);\n auto c = ops::Identity(scope.WithOpName(\"C\"), a);\n auto d =\n ops::Merge(scope.WithOpName(\"D\"), std::initializer_list{a, c});\n auto e =\n ops::Merge(scope.WithOpName(\"E\"), std::initializer_list{b, c});\n auto f = ops::Identity(scope.WithOpName(\"F\"), a);\n f.node()->set_assigned_device_name(kTpu2);\n Graph graph(OpRegistry::Global());\n TF_ASSERT_OK(scope.ToGraph(&graph));\n PropagateDevices(TargetOps({\"Identity\", \"Merge\"}), IsTPUDevice, &graph);\n EXPECT_THAT(\n GetNodeNameDevices(graph),\n UnorderedElementsAreArray(\n std::vector>{\n {\"A\", kTpu0}, \n {\"B\", kTpu1}, \n {\"C\", kTpu0},\n {\"D\", kTpu0},\n {\"E\", \"\"},\n {\"F\", kTpu2},\n }));\n}\nTEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) {\n Scope scope = Scope::NewRootScope().ExitOnError();\n auto a = ops::Placeholder(scope.WithOpName(\"A\"), DT_FLOAT);\n a.node()->set_assigned_device_name(kTpu0);\n auto b = ops::Identity(scope.WithOpName(\"B\"), a);\n Graph graph(OpRegistry::Global());\n TF_ASSERT_OK(scope.ToGraph(&graph));\n PropagateDevices(TargetOps({\"Merge\"}), IsTPUDevice, &graph);\n EXPECT_THAT(GetNodeNameDevices(graph),\n UnorderedElementsAreArray(\n std::vector>{\n {\"A\", kTpu0}, \n {\"B\", \"\"}, \n }));\n}\nTEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) {\n Scope scope = Scope::NewRootScope().ExitOnError();\n auto a = ops::Placeholder(scope.WithOpName(\"A\"), DT_FLOAT);\n a.node()->set_assigned_device_name(kGpu0);\n auto b = ops::Identity(scope.WithOpName(\"B\"), a);\n Graph graph(OpRegistry::Global());\n TF_ASSERT_OK(scope.ToGraph(&graph));\n PropagateDevices(TargetOps({\"Identity\"}), IsTPUDevice, &graph);\n EXPECT_THAT(GetNodeNameDevices(graph),\n UnorderedElementsAreArray(\n std::vector>{\n {\"A\", kGpu0}, \n {\"B\", \"\"}, \n }));\n}\nTEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) {\n Scope scope = Scope::NewRootScope().ExitOnError();\n auto a = ops::Placeholder(scope.WithOpName(\"A\"), DT_BOOL);\n auto b = ops::LoopCond(scope.WithOpName(\"B\"), a);\n auto c = ops::Placeholder(scope.WithOpName(\"C\"), DT_FLOAT);\n c.node()->set_assigned_device_name(kTpu2);\n auto d = ops::Switch(scope.WithOpName(\"D\"), c, b);\n Graph graph(OpRegistry::Global());\n TF_ASSERT_OK(scope.ToGraph(&graph));\n PropagateDevices(TargetOps({\"Switch\", \"LoopCond\"}), IsTPUDevice, &graph);\n EXPECT_THAT(\n GetNodeNameDevices(graph),\n UnorderedElementsAreArray(std::vector<\n std::pair>{\n {\"A\", \"\"},\n {\"B\", \"\"},\n {\"C\", kTpu2}, \n {\"D\", kTpu2},\n }));\n}\nTEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) {\n Scope scope = Scope::NewRootScope().ExitOnError();\n auto a = ops::Placeholder(scope.WithOpName(\"A\"), DT_FLOAT);\n auto b = ops::Placeholder(scope.WithOpName(\"B\"), DT_FLOAT);\n b.node()->set_assigned_device_name(kTpu2);\n auto c = ops::internal::Enter(scope.WithOpName(\"C\"), a, \"Enter\");\n auto d = ops::NextIteration(scope.WithOpName(\"D\"), b);\n auto e =\n ops::Merge(scope.WithOpName(\"E\"), std::initializer_list{c, d});\n Graph graph(OpRegistry::Global());\n TF_ASSERT_OK(scope.ToGraph(&graph));\n PropagateDevices(TargetOps({\"Enter\", \"Merge\", \"NextIteration\"}), IsTPUDevice,\n &graph);\n EXPECT_THAT(\n GetNodeNameDevices(graph),\n UnorderedElementsAreArray(std::vector<\n std::pair>{\n {\"A\", \"\"},\n {\"B\", kTpu2}, \n {\"C\", \"\"},\n {\"D\", kTpu2},\n {\"E\", kTpu2},\n }));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":228,"cells":{"ID":{"kind":"string","value":"7a493795-3ee7-4cb2-a7b7-639d43aba311"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"softmax"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/delegates/xnnpack/softmax_test.cc"},"Code":{"kind":"string","value":"#if GOOGLE_CUDA && GOOGLE_TENSORRT\n#include \"tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h\"\n#include \"tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h\"\n#include \"tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h\"\nnamespace tensorflow {\nnamespace tensorrt {\nnamespace convert {\nclass ConvertSoftmax : public OpConverterBase {\n public:\n explicit ConvertSoftmax(const OpConverterParams *params)\n : OpConverterBase(params) {}\n static constexpr std::array AllowedDataTypes() {\n return {DataType::DT_FLOAT, DataType::DT_HALF};\n }\n static constexpr std::array InputSpec() {\n return std::array{\n InputArgSpec::Create(\"logits\", TrtInputArg::kTensor)};\n }\n Status Validate() {\n const auto &params = *this->params_;\n const auto &inputs = params.inputs;\n ITensorProxyPtr logits_tensor = inputs.at(0).tensor();\n const int num_trt_dims = logits_tensor->getDimensions().nbDims;\n if (!num_trt_dims && params.use_implicit_batch) {\n return errors::InvalidArgument(\n \"TensorRT Softmax cannot apply on the batch dimension\");\n }\n return OkStatus();\n }\n Status Convert() {\n const auto &params = *this->params_;\n const auto &inputs = params.inputs;\n const auto &node_def = params.node_def;\n ITensorProxyPtr logits_tensor = inputs.at(0).tensor();\n const int num_trt_dims = logits_tensor->getDimensions().nbDims;\n nvinfer1::ISoftMaxLayer *layer =\n params.converter->network()->addSoftMax(*logits_tensor->trt_tensor());\n TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());\n params.converter->SetLayerName(layer, node_def);\n layer->setAxes(1 << (num_trt_dims - 1));\n ITensorProxyPtr output_tensor = layer->getOutput(0);\n params.outputs->push_back(TRT_TensorOrWeights(output_tensor));\n return OkStatus();\n }\n};\nREGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction(),\n \"Softmax\");\n} \n} \n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \"tensorflow/lite/c/c_api_types.h\"\n#include \"tensorflow/lite/delegates/xnnpack/softmax_tester.h\"\n#include \"tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h\"\nnamespace tflite {\nnamespace xnnpack {\nTEST(Softmax, 4D) {\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n const auto height = shape_rng();\n const auto width = shape_rng();\n const auto channels = shape_rng();\n SoftmaxTester()\n .Shape({batch, height, width, channels})\n .Test(xnnpack_delegate.get());\n}\nTEST(Softmax, 3D) {\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n const auto width = shape_rng();\n const auto channels = shape_rng();\n SoftmaxTester().Shape({batch, width, channels}).Test(xnnpack_delegate.get());\n}\nTEST(Softmax, 2D) {\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n const auto channels = shape_rng();\n SoftmaxTester().Shape({batch, channels}).Test(xnnpack_delegate.get());\n}\nTEST(Softmax, 1D) {\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n SoftmaxTester().Shape({batch}).Test(xnnpack_delegate.get());\n}\nTEST(Softmax, DISABLED_Beta) {\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n const auto height = shape_rng();\n const auto width = shape_rng();\n const auto channels = shape_rng();\n SoftmaxTester()\n .Shape({batch, height, width, channels})\n .Beta(0.1f)\n .Test(xnnpack_delegate.get());\n SoftmaxTester()\n .Shape({batch, height, width, channels})\n .Beta(10.0f)\n .Test(xnnpack_delegate.get());\n}\nTEST(Softmax, MultiThreading) {\n TfLiteXNNPackDelegateOptions delegate_options =\n TfLiteXNNPackDelegateOptionsDefault();\n delegate_options.num_threads = 2;\n std::unique_ptr\n xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),\n TfLiteXNNPackDelegateDelete);\n std::random_device random_device;\n auto rng = std::mt19937(random_device());\n auto shape_rng =\n std::bind(std::uniform_int_distribution(2, 5), std::ref(rng));\n const auto batch = shape_rng();\n const auto height = shape_rng();\n const auto width = shape_rng();\n const auto channels = shape_rng();\n SoftmaxTester()\n .Shape({batch, height, width, channels})\n .Test(xnnpack_delegate.get());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/softmax_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":229,"cells":{"ID":{"kind":"string","value":"f03a3830-3544-489a-837c-11b8e6cc9e8f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"load_balancer_server_id"},"File Path in Repository":{"kind":"string","value":"quiche/quic/load_balancer/load_balancer_server_id.cc"},"File Path for Unit Test":{"kind":"string","value":"quiche/quic/load_balancer/load_balancer_server_id_test.cc"},"Code":{"kind":"string","value":"#include \"quiche/quic/load_balancer/load_balancer_server_id.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"quiche/quic/platform/api/quic_bug_tracker.h\"\nnamespace quic {\nLoadBalancerServerId::LoadBalancerServerId(absl::string_view data)\n : LoadBalancerServerId(absl::MakeSpan(\n reinterpret_cast(data.data()), data.length())) {}\nLoadBalancerServerId::LoadBalancerServerId(absl::Span data)\n : length_(data.length()) {\n if (length_ == 0 || length_ > kLoadBalancerMaxServerIdLen) {\n QUIC_BUG(quic_bug_433312504_02)\n << \"Attempted to create LoadBalancerServerId with length \"\n << static_cast(length_);\n length_ = 0;\n return;\n }\n memcpy(data_.data(), data.data(), data.length());\n}\nvoid LoadBalancerServerId::set_length(uint8_t length) {\n QUIC_BUG_IF(quic_bug_599862571_01,\n length == 0 || length > kLoadBalancerMaxServerIdLen)\n << \"Attempted to set LoadBalancerServerId length to \"\n << static_cast(length);\n length_ = length;\n}\nstd::string LoadBalancerServerId::ToString() const {\n return absl::BytesToHexString(\n absl::string_view(reinterpret_cast(data_.data()), length_));\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/quic/load_balancer/load_balancer_server_id.h\"\n#include \n#include \n#include \"absl/hash/hash_testing.h\"\n#include \"absl/types/span.h\"\n#include \"quiche/quic/platform/api/quic_expect_bug.h\"\n#include \"quiche/quic/platform/api/quic_test.h\"\nnamespace quic {\nnamespace test {\nnamespace {\nclass LoadBalancerServerIdTest : public QuicTest {};\nconstexpr uint8_t kRawServerId[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05,\n 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,\n 0x0c, 0x0d, 0x0e, 0x0f};\nTEST_F(LoadBalancerServerIdTest, CreateReturnsNullIfTooLong) {\n EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerServerId(\n absl::Span(kRawServerId, 16))\n .IsValid()),\n \"Attempted to create LoadBalancerServerId with length 16\");\n EXPECT_QUIC_BUG(\n EXPECT_FALSE(LoadBalancerServerId(absl::Span()).IsValid()),\n \"Attempted to create LoadBalancerServerId with length 0\");\n}\nTEST_F(LoadBalancerServerIdTest, CompareIdenticalExceptLength) {\n LoadBalancerServerId server_id(absl::Span(kRawServerId, 15));\n ASSERT_TRUE(server_id.IsValid());\n EXPECT_EQ(server_id.length(), 15);\n LoadBalancerServerId shorter_server_id(\n absl::Span(kRawServerId, 5));\n ASSERT_TRUE(shorter_server_id.IsValid());\n EXPECT_EQ(shorter_server_id.length(), 5);\n EXPECT_TRUE(shorter_server_id < server_id);\n EXPECT_FALSE(server_id < shorter_server_id);\n EXPECT_FALSE(shorter_server_id == server_id);\n}\nTEST_F(LoadBalancerServerIdTest, AccessorFunctions) {\n LoadBalancerServerId server_id(absl::Span(kRawServerId, 5));\n EXPECT_TRUE(server_id.IsValid());\n EXPECT_EQ(server_id.length(), 5);\n EXPECT_EQ(memcmp(server_id.data().data(), kRawServerId, 5), 0);\n EXPECT_EQ(server_id.ToString(), \"0001020304\");\n}\nTEST_F(LoadBalancerServerIdTest, CompareDifferentServerIds) {\n LoadBalancerServerId server_id(absl::Span(kRawServerId, 5));\n ASSERT_TRUE(server_id.IsValid());\n LoadBalancerServerId reverse({0x0f, 0x0e, 0x0d, 0x0c, 0x0b});\n ASSERT_TRUE(reverse.IsValid());\n EXPECT_TRUE(server_id < reverse);\n LoadBalancerServerId long_server_id(\n absl::Span(kRawServerId, 15));\n EXPECT_TRUE(long_server_id < reverse);\n}\nTEST_F(LoadBalancerServerIdTest, EqualityOperators) {\n LoadBalancerServerId server_id(absl::Span(kRawServerId, 15));\n ASSERT_TRUE(server_id.IsValid());\n LoadBalancerServerId shorter_server_id(\n absl::Span(kRawServerId, 5));\n ASSERT_TRUE(shorter_server_id.IsValid());\n EXPECT_FALSE(server_id == shorter_server_id);\n LoadBalancerServerId server_id2 = server_id;\n EXPECT_TRUE(server_id == server_id2);\n}\nTEST_F(LoadBalancerServerIdTest, SupportsHash) {\n LoadBalancerServerId server_id(absl::Span(kRawServerId, 15));\n ASSERT_TRUE(server_id.IsValid());\n LoadBalancerServerId shorter_server_id(\n absl::Span(kRawServerId, 5));\n ASSERT_TRUE(shorter_server_id.IsValid());\n LoadBalancerServerId different_server_id({0x0f, 0x0e, 0x0d, 0x0c, 0x0b});\n ASSERT_TRUE(different_server_id.IsValid());\n EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({\n server_id,\n shorter_server_id,\n different_server_id,\n }));\n}\nTEST_F(LoadBalancerServerIdTest, SetLengthInvalid) {\n LoadBalancerServerId server_id;\n EXPECT_QUIC_BUG(server_id.set_length(16),\n \"Attempted to set LoadBalancerServerId length to 16\");\n EXPECT_QUIC_BUG(server_id.set_length(0),\n \"Attempted to set LoadBalancerServerId length to 0\");\n server_id.set_length(1);\n EXPECT_EQ(server_id.length(), 1);\n server_id.set_length(15);\n EXPECT_EQ(server_id.length(), 15);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":230,"cells":{"ID":{"kind":"string","value":"642de5a1-8571-42ec-86b3-0c039e9b3c62"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"device_compilation_profiler"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/jit/device_compilation_profiler.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/compiler/jit/device_compilation_profiler_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/compiler/jit/device_compilation_profiler.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"tensorflow/compiler/jit/xla_activity.pb.h\"\n#include \"tensorflow/compiler/jit/xla_activity_listener.h\"\n#include \"tensorflow/core/framework/attr_value.pb.h\"\n#include \"tensorflow/core/framework/metrics.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tsl/platform/mutex.h\"\nnamespace tensorflow {\nnamespace {\nbool ShouldBeMegamorphic(int64_t compile_count, int64_t execution_count) {\n const int64_t kCompileThreshold = 10;\n const int64_t kMinExecutionsPerCompile = 50;\n return compile_count > kCompileThreshold &&\n execution_count < kMinExecutionsPerCompile * compile_count;\n}\nvoid RegisterExecutionForCluster(\n const NameAttrList& function,\n DeviceCompilationProfiler::ClusterCompileStats* stats) {\n ++stats->execution_count;\n if (!stats->is_megamorphic &&\n ShouldBeMegamorphic(stats->compile_count, stats->execution_count)) {\n VLOG(1) << \"Marking \" << function.name()\n << \" as megamorphic, compile_count=\" << stats->compile_count\n << \" execution_count=\" << stats->execution_count;\n stats->is_megamorphic = true;\n }\n}\nconstexpr int64_t kDefaultCompilationThreshold = 2;\nconstexpr int64_t kMaxNumOngoingCompilations = kNumAsyncDeviceCompilerThreads;\n} \nDeviceCompilationProfiler::~DeviceCompilationProfiler() {\n mutex_lock lock(mu_);\n cluster_compile_stats_.clear();\n}\nabsl::StatusOr\nDeviceCompilationProfiler::GetCompileStats(const NameAttrList& function) const {\n mutex_lock lock(mu_);\n if (auto it = cluster_compile_stats_.find(function.name());\n it != cluster_compile_stats_.end()) {\n return it->second;\n }\n return errors::NotFound(\"Couldn't find compilation stats for cluster: \",\n function.name());\n}\nvoid DeviceCompilationProfiler::RegisterExecution(\n const NameAttrList& function) {\n mutex_lock lock(mu_);\n auto it =\n cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})\n .first;\n RegisterExecutionForCluster(function, &it->second);\n}\nStatus DeviceCompilationProfiler::RegisterCompilation(\n const NameAttrList& function, int64_t compile_time_us,\n bool used_persistent_cache) {\n metrics::UpdateXlaCompilationTime(compile_time_us);\n const std::string& function_name = function.name();\n mutex_lock lock(mu_);\n auto it =\n cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})\n .first;\n const uint64 compile_time_s = compile_time_us / 1.0e6;\n it->second.compile_count++;\n it->second.cumulative_compile_time_us += compile_time_us;\n VLOG(1) << \"Compiled \" << function_name << \" \" << it->second.compile_count\n << \" times, compile time: \" << compile_time_us\n << \" us, cumulative: \" << it->second.cumulative_compile_time_us\n << \" us (\"\n << tensorflow::strings::HumanReadableElapsedTime(compile_time_s)\n << \" / \"\n << tensorflow::strings::HumanReadableElapsedTime(\n it->second.cumulative_compile_time_us / 1.0e6)\n << \")\";\n XlaJitCompilationActivity jit_compilation_activity;\n jit_compilation_activity.set_cluster_name(function_name);\n jit_compilation_activity.set_compile_count(it->second.compile_count);\n jit_compilation_activity.set_compile_time_us(compile_time_us);\n jit_compilation_activity.set_cumulative_compile_time_us(\n it->second.cumulative_compile_time_us);\n jit_compilation_activity.set_used_persistent_cache(used_persistent_cache);\n return BroadcastXlaActivity(std::move(jit_compilation_activity));\n}\nbool DeviceCompilationProfiler::ShouldCompileCluster(\n const NameAttrList& function, DeviceCompileMode compile_mode,\n int64_t current_request_count) {\n std::optional compile_threshold;\n if (compile_mode == DeviceCompileMode::kLazy) {\n compile_threshold = kDefaultCompilationThreshold;\n } else if (compile_mode == DeviceCompileMode::kAsync) {\n compile_threshold = 0; \n }\n if (compile_mode == DeviceCompileMode::kStrict) {\n return true;\n }\n mutex_lock lock(mu_);\n auto [it, cluster_not_found] =\n cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{});\n if (cluster_not_found) {\n RegisterExecutionForCluster(function, &it->second);\n }\n if (it->second.is_megamorphic) {\n BroadcastOptimizationRemark(XlaOptimizationRemark::MEGAMORPHIC_FUNCTION,\n function.name())\n .IgnoreError();\n VLOG(2) << \"Not compiling cluster \" << function.name()\n << \" because it is megamorphic.\";\n return false;\n }\n if (it->second.execution_count == 1) {\n return true;\n }\n if (compile_mode == DeviceCompileMode::kAsync) {\n if (num_ongoing_compilations_ >= kMaxNumOngoingCompilations) {\n VLOG(2) << \"Not asynchronously compiling cluster \" << function.name()\n << \" because of too many ongoing compilations.\";\n return false;\n }\n }\n bool reached_compile_threshold = current_request_count >= *compile_threshold;\n if (!reached_compile_threshold) {\n VLOG(2) << \"Not compiling cluster \" << function.name()\n << \" because it has not reached compile threshold; threshold is \"\n << *compile_threshold << \" execution count \"\n << current_request_count << \".\";\n }\n return reached_compile_threshold;\n}\nvoid DeviceCompilationProfiler::IncrementOngoingAsyncCompilations() {\n mutex_lock lock(mu_);\n num_ongoing_compilations_++;\n}\nvoid DeviceCompilationProfiler::DecrementOngoingAsyncCompilations() {\n mutex_lock lock(mu_);\n num_ongoing_compilations_--;\n}\nint64_t DeviceCompilationProfiler::GetNumOngoingAsyncCompilations() const {\n mutex_lock lock(mu_);\n return num_ongoing_compilations_;\n}\nstd::string DeviceCompilationProfiler::DebugString() const {\n std::string debug_string =\n \"DeviceCompilationProfiler {\\ncluster_compile_stats: {\\n\";\n {\n mutex_lock lock(mu_);\n for (const auto& [key, stats] : cluster_compile_stats_) {\n absl::StrAppend(&debug_string, key, \": \", stats.DebugString(), \"\\n\");\n }\n }\n absl::StrAppend(&debug_string, \"}\\nnum_ongoing_compilations=\",\n GetNumOngoingAsyncCompilations(), \"\\n}\\n\");\n return debug_string;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/compiler/jit/device_compilation_profiler.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"tensorflow/compiler/jit/tests/device_compiler_test_helper.h\"\n#include \"tensorflow/compiler/jit/xla_activity.pb.h\"\n#include \"tensorflow/core/framework/attr_value.pb.h\"\nnamespace tensorflow {\nnamespace {\nTEST(DeviceCompilationProfilerTest, RegisterExecution) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n for (int i = 0; i < 5; ++i) {\n profiler->RegisterExecution(function);\n }\n TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));\n EXPECT_EQ(stats.execution_count, 5);\n}\nTEST(DeviceCompilationProfilerTest, RegisterCompilation) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n auto listener = std::make_unique();\n auto listener_ptr = listener.get();\n RegisterXlaActivityListener(std::move(listener));\n NameAttrList function;\n function.set_name(\"TestFunc\");\n std::vector expected_activities;\n for (int i = 0; i < 5; ++i) {\n EXPECT_TRUE(profiler->RegisterCompilation(function, 4, false).ok());\n TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));\n XlaJitCompilationActivity expected_activity;\n expected_activity.set_cluster_name(function.name());\n expected_activity.set_compile_count(stats.compile_count);\n expected_activity.set_compile_time_us(4);\n expected_activity.set_cumulative_compile_time_us(\n stats.cumulative_compile_time_us);\n expected_activity.set_used_persistent_cache(false);\n expected_activities.push_back(expected_activity);\n }\n TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));\n EXPECT_EQ(stats.compile_count, 5);\n EXPECT_EQ(stats.cumulative_compile_time_us, 5 * 4);\n const auto& actual_activities = listener_ptr->GetListenerHistory();\n EXPECT_EQ(actual_activities.size(), expected_activities.size());\n for (size_t i = 0; i < actual_activities.size(); ++i) {\n EXPECT_EQ(actual_activities[i].SerializeAsString(),\n expected_activities[i].SerializeAsString());\n }\n}\nTEST(DeviceCompilationProfilerTest, OngoingAsyncCompilations) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n for (int i = 0; i < 5; ++i) {\n profiler->IncrementOngoingAsyncCompilations();\n }\n EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 5);\n for (int i = 0; i < 5; ++i) {\n profiler->DecrementOngoingAsyncCompilations();\n }\n EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);\n for (int i = 0; i < 5; ++i) {\n profiler->IncrementOngoingAsyncCompilations();\n profiler->DecrementOngoingAsyncCompilations();\n }\n EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);\n}\nTEST(DeviceCompilationProfilerTest, ShouldCompileClusterNotFound) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));\n}\nTEST(DeviceCompilationProfilerTest, ShouldCompileClusterFirstExecution) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n profiler->RegisterExecution(function);\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));\n}\nTEST(DeviceCompilationProfilerTest, ShouldCompileClusterMegamorphic) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n const int64_t kCompileThreshold = 10;\n const int64_t kMinExecutionsPerCompile = 50;\n for (int i = 0; i < kCompileThreshold + 1; ++i) {\n EXPECT_TRUE(profiler->RegisterCompilation(function, 1, false).ok());\n }\n profiler->RegisterExecution(function);\n EXPECT_FALSE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n EXPECT_FALSE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));\n TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));\n EXPECT_TRUE(stats.is_megamorphic);\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));\n for (int i = 0; i < kCompileThreshold * kMinExecutionsPerCompile + 1; ++i) {\n profiler->RegisterExecution(function);\n }\n EXPECT_FALSE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n EXPECT_FALSE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));\n TF_ASSERT_OK_AND_ASSIGN(stats, profiler->GetCompileStats(function));\n EXPECT_TRUE(stats.is_megamorphic);\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));\n}\nTEST(DeviceCompilationProfilerTest, ShouldCompileClusterAsync) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n const int64_t kMaxNumOngoingCompilations = 10;\n for (int i = 0; i < kMaxNumOngoingCompilations; ++i) {\n profiler->IncrementOngoingAsyncCompilations();\n }\n profiler->RegisterExecution(function);\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n profiler->RegisterExecution(function);\n EXPECT_FALSE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n profiler->DecrementOngoingAsyncCompilations();\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));\n}\nTEST(DeviceCompilationProfilerTest, ShouldCompileClusterLazy) {\n DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();\n core::ScopedUnref profiler_ref(profiler);\n NameAttrList function;\n function.set_name(\"TestFunc\");\n constexpr int64_t kDefaultCompilationThreshold = 2;\n profiler->RegisterExecution(function);\n EXPECT_TRUE(\n profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));\n profiler->RegisterExecution(function);\n for (int current_request_count = 0;\n current_request_count < kDefaultCompilationThreshold;\n ++current_request_count) {\n EXPECT_FALSE(profiler->ShouldCompileCluster(\n function, DeviceCompileMode::kLazy, current_request_count));\n }\n EXPECT_TRUE(profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy,\n kDefaultCompilationThreshold));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":231,"cells":{"ID":{"kind":"string","value":"24bb0006-b05f-484b-99d7-4f82b5dc15ee"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"executor"},"File Path in Repository":{"kind":"string","value":"tensorstore/util/executor.h"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/util/executor_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORSTORE_UTIL_EXECUTOR_H_\n#define TENSORSTORE_UTIL_EXECUTOR_H_\n#include \n#include \n#include \n#include \"absl/base/attributes.h\"\n#include \"absl/functional/any_invocable.h\"\n#include \"absl/meta/type_traits.h\"\n#include \"tensorstore/internal/poly/poly.h\"\n#include \"tensorstore/internal/type_traits.h\"\nnamespace tensorstore {\nusing ExecutorTask = absl::AnyInvocable;\nusing Executor = poly::Poly<0, true, void(ExecutorTask) const>;\nclass InlineExecutor {\n public:\n template \n void operator()(Func&& func) const {\n std::forward(func)();\n }\n};\ntemplate \nclass ExecutorBoundFunction {\n public:\n using Executor = ExecutorType;\n using Function = FunctionType;\n template \n std::enable_if_t> \n operator()(T&&... arg) {\n executor(std::bind(std::move(function), std::forward(arg)...));\n }\n template \n std::enable_if_t> operator()(\n T&&... arg) const {\n executor(std::bind(function, std::forward(arg)...));\n }\n ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Executor executor;\n ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Function function;\n};\ntemplate \nstd::enable_if_t<\n !std::is_same_v, InlineExecutor>,\n ExecutorBoundFunction,\n absl::remove_cvref_t>>\nWithExecutor(Executor&& executor, Function&& function) {\n return {std::forward(executor), std::forward(function)};\n}\ntemplate \nstd::enable_if_t, InlineExecutor>,\n Function&&>\nWithExecutor(Executor&& executor, Function&& function) {\n return std::forward(function);\n}\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/util/executor.h\"\n#include \n#include \n#include \nnamespace {\nusing ::tensorstore::Executor;\nusing ::tensorstore::InlineExecutor;\nusing ::tensorstore::WithExecutor;\nTEST(InlineExecutorTest, Basic) {\n Executor executor = InlineExecutor{};\n bool invoked = false;\n executor([&] { invoked = true; });\n EXPECT_TRUE(invoked);\n}\nTEST(WithExecutorTest, NonConst) {\n InlineExecutor executor;\n bool invoked = false;\n struct Func {\n void operator()(bool* x) const = delete;\n void operator()(bool* x) { *x = true; }\n };\n auto with_executor = WithExecutor(executor, Func{});\n with_executor(&invoked);\n EXPECT_TRUE(invoked);\n}\nTEST(WithExecutorTest, Const) {\n InlineExecutor executor;\n bool invoked = false;\n struct Func {\n void operator()(bool* x) const { *x = true; }\n void operator()(bool*) = delete;\n };\n const auto with_executor = WithExecutor(executor, Func{});\n with_executor(&invoked);\n EXPECT_TRUE(invoked);\n}\nTEST(ExecutorTest, MoveOnly) {\n Executor executor = InlineExecutor{};\n int value = 0;\n executor(std::bind([&](const std::unique_ptr& ptr) { value = *ptr; },\n std::make_unique(3)));\n EXPECT_EQ(3, value);\n}\nTEST(WithExecutorTest, MoveOnly) {\n Executor executor = InlineExecutor{};\n int value = 0;\n auto with_executor = WithExecutor(\n executor,\n std::bind([&](const std::unique_ptr& ptr) { value = *ptr; },\n std::make_unique(3)));\n with_executor();\n EXPECT_EQ(3, value);\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":232,"cells":{"ID":{"kind":"string","value":"e5eb671c-fe51-4224-9402-beab750cbaee"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"float8"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/platform/float8.h"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tests/float8_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORFLOW_CORE_PLATFORM_FLOAT8_H_\n#define TENSORFLOW_CORE_PLATFORM_FLOAT8_H_\n#include \"tsl/platform/ml_dtypes.h\"\nnamespace tensorflow {\ntypedef tsl::float8_e4m3fn float8_e4m3fn;\ntypedef tsl::float8_e5m2 float8_e5m2;\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/client_library_test_base.h\"\n#include \"xla/tests/test_macros.h\"\n#include \"tsl/platform/ml_dtypes.h\"\nnamespace xla {\nnamespace {\ntemplate \nclass Float8Test : public ClientLibraryTestBase {};\nusing DataTypes = ::testing::Types;\nTYPED_TEST_SUITE(Float8Test, DataTypes);\nXLA_TYPED_TEST(Float8Test, ScalarOperation) {\n XlaBuilder builder(this->TestName());\n auto x = ConstantR0(&builder, static_cast(2.0f));\n auto y = ConstantR0(&builder, static_cast(1.0f));\n Add(x, y);\n this->template ComputeAndCompareR0(\n &builder, static_cast(3.0f), {});\n}\nXLA_TYPED_TEST(Float8Test, LogOperation) {\n XlaBuilder builder(this->TestName());\n auto x = ConstantR0(&builder, static_cast(4.0f));\n Log(x);\n this->template ComputeAndCompareR0(\n &builder, static_cast(1.387f), {});\n}\nXLA_TYPED_TEST(Float8Test, CompareOperation) {\n XlaBuilder builder(this->TestName());\n auto x = ConstantR1(&builder, {TypeParam{1.0}, TypeParam{2.0}});\n auto y = ConstantR1(&builder, {TypeParam{1.0}, TypeParam{3.0}});\n Eq(x, y);\n this->template ComputeAndCompareR1(&builder, {true, false}, {});\n}\nXLA_TYPED_TEST(Float8Test, DotOperation) {\n XlaBuilder builder(this->TestName());\n auto x = ConstantR2(&builder, {{TypeParam{0.0}, TypeParam{1.0}},\n {TypeParam{2.0}, TypeParam{3.0}}});\n auto y = ConstantR2(&builder, {{TypeParam{3.0}, TypeParam{2.0}},\n {TypeParam{1.0}, TypeParam{0.0}}});\n Dot(x, y);\n this->template ComputeAndCompareR2(\n &builder,\n {{TypeParam{1.0}, TypeParam{0.0}}, {TypeParam{9.0}, TypeParam{4.0}}}, {});\n}\nXLA_TYPED_TEST(Float8Test, NegateScalar) {\n XlaBuilder builder(this->TestName());\n Neg(ConstantR0(&builder, static_cast(2.0f)));\n this->template ComputeAndCompareR0(\n &builder, static_cast(-2.0f), {});\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/float8.h"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/float8_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":233,"cells":{"ID":{"kind":"string","value":"2c0d47c7-2213-4e44-90ad-237902404512"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/cel-cpp"},"File Name":{"kind":"string","value":"timestamp_type"},"File Path in Repository":{"kind":"string","value":"common/types/timestamp_type.h"},"File Path for Unit Test":{"kind":"string","value":"common/types/timestamp_type_test.cc"},"Code":{"kind":"string","value":"#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_\n#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_\n#include \n#include \n#include \n#include \"absl/strings/string_view.h\"\n#include \"common/type_kind.h\"\nnamespace cel {\nclass Type;\nclass TypeParameters;\nclass TimestampType final {\n public:\n static constexpr TypeKind kKind = TypeKind::kTimestamp;\n static constexpr absl::string_view kName = \"google.protobuf.Timestamp\";\n TimestampType() = default;\n TimestampType(const TimestampType&) = default;\n TimestampType(TimestampType&&) = default;\n TimestampType& operator=(const TimestampType&) = default;\n TimestampType& operator=(TimestampType&&) = default;\n static TypeKind kind() { return kKind; }\n static absl::string_view name() { return kName; }\n static TypeParameters GetParameters();\n static std::string DebugString() { return std::string(name()); }\n constexpr void swap(TimestampType&) noexcept {}\n};\ninline constexpr void swap(TimestampType& lhs, TimestampType& rhs) noexcept {\n lhs.swap(rhs);\n}\ninline constexpr bool operator==(TimestampType, TimestampType) { return true; }\ninline constexpr bool operator!=(TimestampType lhs, TimestampType rhs) {\n return !operator==(lhs, rhs);\n}\ntemplate \nH AbslHashValue(H state, TimestampType) {\n return std::move(state);\n}\ninline std::ostream& operator<<(std::ostream& out, const TimestampType& type) {\n return out << type.DebugString();\n}\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \"absl/hash/hash.h\"\n#include \"common/type.h\"\n#include \"internal/testing.h\"\nnamespace cel {\nnamespace {\nTEST(TimestampType, Kind) {\n EXPECT_EQ(TimestampType().kind(), TimestampType::kKind);\n EXPECT_EQ(Type(TimestampType()).kind(), TimestampType::kKind);\n}\nTEST(TimestampType, Name) {\n EXPECT_EQ(TimestampType().name(), TimestampType::kName);\n EXPECT_EQ(Type(TimestampType()).name(), TimestampType::kName);\n}\nTEST(TimestampType, DebugString) {\n {\n std::ostringstream out;\n out << TimestampType();\n EXPECT_EQ(out.str(), TimestampType::kName);\n }\n {\n std::ostringstream out;\n out << Type(TimestampType());\n EXPECT_EQ(out.str(), TimestampType::kName);\n }\n}\nTEST(TimestampType, Hash) {\n EXPECT_EQ(absl::HashOf(TimestampType()), absl::HashOf(TimestampType()));\n}\nTEST(TimestampType, Equal) {\n EXPECT_EQ(TimestampType(), TimestampType());\n EXPECT_EQ(Type(TimestampType()), TimestampType());\n EXPECT_EQ(TimestampType(), Type(TimestampType()));\n EXPECT_EQ(Type(TimestampType()), Type(TimestampType()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type_test.cc"},"Commit Hash":{"kind":"string","value":"4552db5798fb0853b131b783d8875794334fae7f"}}},{"rowIdx":234,"cells":{"ID":{"kind":"string","value":"34e66ece-9ff8-4b9c-9545-dcb0094f4b53"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"partitioning_utils"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/common_runtime/partitioning_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/common_runtime/partitioning_utils_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/partitioning_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"tensorflow/core/common_runtime/arg_ret_placement.h\"\n#include \"tensorflow/core/common_runtime/graph_constructor.h\"\n#include \"tensorflow/core/framework/function.h\"\n#include \"tensorflow/core/framework/op.h\"\n#include \"tensorflow/core/framework/types.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/graph/graph_partition.h\"\nnamespace tensorflow {\nnamespace {\nStatus PartitionFunctionGraph(\n const DeviceSet& device_set, Graph* graph,\n std::unordered_map* partitions,\n std::function node_to_loc,\n std::function get_tensor_name_attr) {\n PartitionOptions partition_options;\n if (node_to_loc != nullptr) {\n partition_options.node_to_loc = node_to_loc;\n } else {\n partition_options.node_to_loc = [](const Node* node) {\n return node->assigned_device_name();\n };\n }\n int64_t edge_name_counter = 0;\n partition_options.new_name = [&edge_name_counter](const string& prefix) {\n return strings::StrCat(prefix, \"/_\", ++edge_name_counter);\n };\n partition_options.get_incarnation =\n [&device_set](const string& name) -> int64 {\n const Device* d = device_set.FindDeviceByName(name);\n if (d == nullptr) {\n return PartitionOptions::kIllegalIncarnation;\n } else {\n return d->attributes().incarnation();\n }\n };\n partition_options.control_flow_added = false;\n partition_options.get_tensor_name_attr = get_tensor_name_attr;\n partition_options.can_make_destructive_changes = true;\n return Partition(partition_options, graph, partitions);\n}\nstruct SendRecvPair {\n Node* send_node = nullptr;\n Node* recv_node = nullptr;\n};\nconstexpr char kTensorNameAttr[] = \"tensor_name\";\nStatus MakeSendRecvDependencyExplicit(Graph* graph) {\n absl::flat_hash_map send_recv_pairs;\n for (Node* node : graph->op_nodes()) {\n if (node->IsSend() || node->IsRecv()) {\n auto tensor_name_it = node->def().attr().find(kTensorNameAttr);\n if (tensor_name_it == node->def().attr().end()) {\n return errors::Internal(\n \"'\", kTensorNameAttr,\n \"' attribute is not found from node: \", node->DebugString());\n }\n if (node->IsSend()) {\n send_recv_pairs[tensor_name_it->second.s()].send_node = node;\n } else {\n send_recv_pairs[tensor_name_it->second.s()].recv_node = node;\n }\n }\n }\n for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {\n if (send_recv_pair.send_node == nullptr ||\n send_recv_pair.recv_node == nullptr) {\n return errors::Internal(\n \"No matching Send/Recv nodes found for tensor_name = \", tensor_name);\n }\n graph->AddControlEdge(send_recv_pair.send_node, send_recv_pair.recv_node);\n }\n return absl::OkStatus();\n}\n} \nStatus PartitionFunctionGraph(\n const DeviceSet& device_set, std::unique_ptr graph,\n std::unordered_map>* subgraphs,\n std::function get_tensor_name_attr) {\n std::unordered_map partitions;\n TF_RETURN_IF_ERROR(\n PartitionFunctionGraph(device_set, graph.get(), &partitions,\n nullptr, get_tensor_name_attr));\n const OpRegistryInterface* default_registry =\n graph->flib_def().default_registry();\n graph.reset();\n for (auto& partition : partitions) {\n const string& device = partition.first;\n GraphDef& graph_def = partition.second;\n auto subgraph = std::make_unique(default_registry);\n GraphConstructorOptions opts;\n opts.allow_internal_ops = true;\n opts.expect_device_spec = true;\n TF_RETURN_IF_ERROR(\n ConvertGraphDefToGraph(opts, std::move(graph_def), subgraph.get()));\n subgraphs->emplace(device, std::move(subgraph));\n }\n return absl::OkStatus();\n}\nabsl::StatusOr> InsertTransferOps(\n const DeviceSet& device_set, std::unique_ptr graph) {\n auto node_to_loc = [](const Node* node) {\n return node->assigned_device_name();\n };\n bool has_multiple_devices = false;\n absl::optional location;\n for (const Node* node : graph->op_nodes()) {\n if (location) {\n if (*location != node_to_loc(node)) {\n has_multiple_devices = true;\n break;\n }\n } else {\n location = node_to_loc(node);\n }\n }\n if (!has_multiple_devices) {\n return graph;\n }\n auto new_graph = std::make_unique(graph->flib_def());\n std::unordered_map partitions;\n TF_RETURN_IF_ERROR(PartitionFunctionGraph(device_set, graph.get(),\n &partitions, node_to_loc,\n nullptr));\n GraphDef merged_graph_def;\n if (!partitions.empty()) {\n auto iter = partitions.begin();\n merged_graph_def = std::move(iter->second);\n while (++iter != partitions.end()) {\n merged_graph_def.MergeFrom(iter->second);\n }\n }\n GraphConstructorOptions opts;\n opts.allow_internal_ops = true;\n opts.expect_device_spec = true;\n TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(merged_graph_def),\n new_graph.get()));\n TF_RETURN_IF_ERROR(MakeSendRecvDependencyExplicit(new_graph.get()));\n return std::move(new_graph);\n}\nStatus UpdateArgAndRetvalMetadata(\n Graph* graph, std::vector* arg_indices,\n std::vector* ret_indices,\n std::vector* arg_alloc_attrs,\n std::vector* ret_alloc_attrs, bool ints_on_device) {\n std::vector> arg_nodes;\n std::vector> ret_nodes;\n const AttrValue* attr_value;\n for (Node* node : graph->op_nodes()) {\n if (node->IsArg()) {\n TF_RETURN_IF_ERROR(node->attrs().Find(\"index\", &attr_value));\n int index = static_cast(attr_value->i());\n int sub_index = -1;\n if (node->attrs().Find(\"sub_index\", &attr_value).ok()) {\n sub_index = static_cast(attr_value->i());\n }\n arg_nodes.emplace_back(node, FunctionArgIndex(index, sub_index));\n } else if (node->IsRetval()) {\n TF_RETURN_IF_ERROR(node->attrs().Find(\"index\", &attr_value));\n int index = static_cast(attr_value->i());\n ret_nodes.emplace_back(node, index);\n }\n }\n auto arg_comparator = [](std::pair a,\n std::pair b) {\n return std::tie(a.second.index, a.second.sub_index) <\n std::tie(b.second.index, b.second.sub_index);\n };\n std::sort(arg_nodes.begin(), arg_nodes.end(), arg_comparator);\n auto ret_comparator = [](std::pair a, std::pair b) {\n return a.second < b.second;\n };\n std::sort(ret_nodes.begin(), ret_nodes.end(), ret_comparator);\n arg_indices->reserve(arg_nodes.size());\n for (const auto& pair : arg_nodes) arg_indices->push_back(pair.second);\n ret_indices->reserve(ret_nodes.size());\n for (const auto& pair : ret_nodes) ret_indices->push_back(pair.second);\n for (int i = 0; i < arg_nodes.size(); ++i) {\n Node* arg = arg_nodes[i].first;\n arg->AddAttr(\"index\", i);\n }\n if (arg_alloc_attrs != nullptr) {\n TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForArgs(\n arg_nodes, ints_on_device, *arg_alloc_attrs));\n }\n for (int i = 0; i < ret_nodes.size(); ++i) {\n Node* ret = ret_nodes[i].first;\n ret->AddAttr(\"index\", i);\n }\n if (ret_alloc_attrs) {\n TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForRets(\n ret_nodes, ints_on_device, *ret_alloc_attrs));\n }\n return absl::OkStatus();\n}\nstring FunctionNameGenerator::GetName() {\n while (true) {\n const string candidate = strings::StrCat(name_, \"_\", counter_++);\n if (flib_def_->Find(candidate) == nullptr) {\n return candidate;\n }\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/partitioning_utils.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"tensorflow/cc/ops/array_ops.h\"\n#include \"tensorflow/cc/ops/function_ops.h\"\n#include \"tensorflow/core/common_runtime/device_factory.h\"\n#include \"tensorflow/core/common_runtime/device_mgr.h\"\n#include \"tensorflow/core/common_runtime/function_testlib.h\"\n#include \"tensorflow/core/common_runtime/int32_fulltype.h\"\n#include \"tensorflow/core/common_runtime/placer.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/core/status.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/gtl/array_slice.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/public/session_options.h\"\nnamespace tensorflow {\nnamespace {\nusing ::testing::SizeIs;\nclass PartitioningUtilsTest : public ::testing::Test {\n public:\n void SetUp() override {\n SessionOptions options;\n auto* device_count = options.config.mutable_device_count();\n device_count->insert({\"CPU\", 2});\n std::vector> devices;\n TF_CHECK_OK(DeviceFactory::AddDevices(options, \"/job:a/replica:0/task:0\",\n &devices));\n device0_ = devices[0].get();\n device1_ = devices[1].get();\n device_mgr_ = std::make_unique(std::move(devices));\n for (auto d : device_mgr_->ListDevices()) {\n device_set_.AddDevice(d);\n }\n }\n void SwapGraph(Graph* graph, bool assign_device = false) {\n Scope s = Scope::NewRootScope();\n if (assign_device) {\n s = s.WithDevice(device0_->name());\n }\n auto x = ops::_Arg(s.WithOpName(\"x\"), DT_FLOAT, 0);\n auto y = ops::_Arg(s.WithOpName(\"y\"), DT_FLOAT, 1);\n auto id_x = ops::Identity(s.WithOpName(\"id_x\"), x);\n auto id_y = ops::Identity(s.WithOpName(\"id_y\"), y);\n auto dx_retval = ops::_Retval(s.WithOpName(\"retval1\"), id_y, 0);\n auto dy_retval = ops::_Retval(s.WithOpName(\"retval2\"), id_x, 1);\n TF_ASSERT_OK(s.ToGraph(graph));\n if (assign_device) {\n FunctionLibraryDefinition flib_def(OpRegistry::Global());\n Placer placer(graph, \"\", &flib_def, &device_set_, device0_);\n TF_ASSERT_OK(placer.Run());\n }\n }\n void TwoDeviceSwapGraph(Graph* graph) {\n Scope s = Scope::NewRootScope();\n Scope s1 = s.WithDevice(\"/job:a/replica:0/task:0/device:CPU:0\");\n Scope s2 = s.WithDevice(\"/job:a/replica:0/task:0/device:CPU:1\");\n auto x = ops::_Arg(s1.WithOpName(\"x\"), DT_FLOAT, 0);\n auto y = ops::_Arg(s2.WithOpName(\"y\"), DT_FLOAT, 1);\n auto id_x = ops::Identity(s1.WithOpName(\"id_x\"), x);\n auto id_y = ops::Identity(s2.WithOpName(\"id_y\"), y);\n auto dx_retval = ops::_Retval(s2.WithOpName(\"retval1\"), id_y, 0);\n auto dy_retval = ops::_Retval(s1.WithOpName(\"retval2\"), id_x, 1);\n TF_ASSERT_OK(s.ToGraph(graph));\n FunctionLibraryDefinition flib_def(OpRegistry::Global());\n Placer placer(graph, \"\", &flib_def, &device_set_, device0_);\n TF_ASSERT_OK(placer.Run());\n }\n void SubGraph(Graph* subgraph, DataType dtype,\n absl::Span arg_indices,\n absl::Span ret_indices) {\n Scope s = Scope::NewRootScope();\n Scope s1 = s.WithDevice(\"/job:a/replica:0/task:0/device:CPU:0\");\n CHECK_EQ(arg_indices.size(), ret_indices.size());\n for (size_t i = 0; i < arg_indices.size(); ++i) {\n auto x = ops::_Arg(s1.WithOpName(\"x\"), dtype, arg_indices[i]);\n auto id_x = ops::Identity(s1.WithOpName(\"id_x\"), x);\n auto dx_retval =\n ops::_Retval(s1.WithOpName(\"retval1\"), id_x, ret_indices[i]);\n }\n TF_ASSERT_OK(s.ToGraph(subgraph));\n FunctionLibraryDefinition flib_def(OpRegistry::Global());\n Placer placer(subgraph, \"\", &flib_def, &device_set_, device0_);\n TF_ASSERT_OK(placer.Run());\n }\n std::unique_ptr device_mgr_;\n Device* device0_ = nullptr; \n Device* device1_ = nullptr; \n DeviceSet device_set_;\n};\nTEST_F(PartitioningUtilsTest, GraphWithoutAssignedDevicesFails) {\n std::unique_ptr graph = std::make_unique(OpRegistry::Global());\n SwapGraph(graph.get());\n std::unordered_map> subgraphs;\n Status status =\n PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);\n ASSERT_TRUE(errors::IsInvalidArgument(status)) << status.ToString();\n}\nTEST_F(PartitioningUtilsTest, OneDevice) {\n std::unique_ptr graph = std::make_unique(OpRegistry::Global());\n SwapGraph(graph.get(), true);\n int num_nodes = graph->num_op_nodes();\n std::unordered_map> subgraphs;\n Status status =\n PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);\n ASSERT_TRUE(status.ok()) << status.ToString();\n ASSERT_EQ(1, subgraphs.size());\n const auto& pair = *subgraphs.begin();\n ASSERT_EQ(\"/job:a/replica:0/task:0/device:CPU:0\", pair.first);\n ASSERT_EQ(num_nodes, pair.second->num_op_nodes());\n}\nTEST_F(PartitioningUtilsTest, TwoDevices) {\n std::unique_ptr graph = std::make_unique(OpRegistry::Global());\n TwoDeviceSwapGraph(graph.get());\n std::unordered_map> subgraphs;\n Status status =\n PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);\n ASSERT_TRUE(status.ok()) << status.ToString();\n ASSERT_EQ(2, subgraphs.size());\n const auto& part1 = subgraphs[\"/job:a/replica:0/task:0/device:CPU:0\"];\n ASSERT_EQ(3, part1->num_op_nodes());\n const auto& part2 = subgraphs[\"/job:a/replica:0/task:0/device:CPU:1\"];\n ASSERT_EQ(3, part2->num_op_nodes());\n}\nTEST_F(PartitioningUtilsTest, InsertTransferOpsWithOneDevice) {\n auto graph = std::make_unique(OpRegistry::Global());\n Scope scope = Scope::NewRootScope().WithDevice(device0_->name());\n auto x = ops::_Arg(scope.WithOpName(\"x\"), DT_FLOAT, 0);\n auto id_x = ops::Identity(scope.WithOpName(\"id_x\"), x);\n auto ret_x = ops::_Retval(scope.WithOpName(\"ret_x\"), id_x, 0);\n TF_ASSERT_OK(scope.ToGraph(graph.get()));\n FunctionLibraryDefinition flib_def(OpRegistry::Global());\n Placer placer(graph.get(), \"\", &flib_def, &device_set_, device0_);\n TF_ASSERT_OK(placer.Run());\n EXPECT_EQ(graph->num_op_nodes(), 3);\n int send_count = 0, recv_count = 0;\n for (const auto* op : graph->op_nodes()) {\n if (op->IsSend())\n ++send_count;\n else if (op->IsRecv())\n ++recv_count;\n }\n ASSERT_EQ(send_count, 0);\n ASSERT_EQ(recv_count, 0);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr new_graph,\n InsertTransferOps(device_set_, std::move(graph)));\n EXPECT_EQ(new_graph->num_op_nodes(), 3);\n send_count = recv_count = 0;\n for (const auto* op : new_graph->op_nodes()) {\n if (op->IsSend())\n ++send_count;\n else if (op->IsRecv())\n ++recv_count;\n }\n EXPECT_EQ(send_count, 0);\n EXPECT_EQ(recv_count, 0);\n}\nTEST_F(PartitioningUtilsTest, InsertTransferOpsWithTwoDevices) {\n auto graph = std::make_unique(OpRegistry::Global());\n Scope scope = Scope::NewRootScope();\n Scope scope1 = scope.WithDevice(device0_->name());\n Scope scope2 = scope.WithDevice(device1_->name());\n auto x = ops::_Arg(scope1.WithOpName(\"x\"), DT_FLOAT, 0);\n auto id_x = ops::Identity(scope2.WithOpName(\"id_x\"), x);\n auto ret_x = ops::_Retval(scope1.WithOpName(\"ret_x\"), id_x, 0);\n TF_ASSERT_OK(scope.ToGraph(graph.get()));\n FunctionLibraryDefinition flib_def(OpRegistry::Global());\n Placer placer(graph.get(), \"\", &flib_def, &device_set_, device0_);\n TF_ASSERT_OK(placer.Run());\n EXPECT_EQ(graph->num_op_nodes(), 3);\n int send_count = 0, recv_count = 0;\n for (const auto* op : graph->op_nodes()) {\n if (op->IsSend())\n ++send_count;\n else if (op->IsRecv())\n ++recv_count;\n }\n ASSERT_EQ(send_count, 0);\n ASSERT_EQ(recv_count, 0);\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr new_graph,\n InsertTransferOps(device_set_, std::move(graph)));\n EXPECT_EQ(new_graph->num_op_nodes(), 7);\n send_count = recv_count = 0;\n auto get_tensor_name_attr = [](const Node* node) -> std::string {\n auto tensor_name_it = node->def().attr().find(\"tensor_name\");\n return tensor_name_it->second.s();\n };\n absl::flat_hash_map> send_recv_pairs;\n for (auto* op : new_graph->op_nodes()) {\n if (op->IsSend()) {\n ++send_count;\n send_recv_pairs[get_tensor_name_attr(op)].first = op;\n } else if (op->IsRecv()) {\n ++recv_count;\n send_recv_pairs[get_tensor_name_attr(op)].second = op;\n }\n }\n EXPECT_EQ(send_count, 2);\n EXPECT_EQ(recv_count, 2);\n for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {\n ASSERT_TRUE(send_recv_pair.first != nullptr &&\n send_recv_pair.second != nullptr);\n std::vector out_edges(\n send_recv_pair.first->out_edges().begin(),\n send_recv_pair.first->out_edges().end());\n ASSERT_THAT(out_edges, SizeIs(2));\n for (const Edge* out_edge : out_edges) {\n if (out_edge->dst() != new_graph->sink_node()) {\n EXPECT_TRUE(out_edge->IsControlEdge());\n EXPECT_EQ(out_edge->dst(), send_recv_pair.second);\n }\n }\n }\n}\nvoid CheckRetIndices(const std::vector& expected,\n const std::vector& actual) {\n ASSERT_EQ(expected.size(), actual.size());\n for (int i = 0; i < expected.size(); ++i) {\n ASSERT_EQ(expected[i], actual[i]) << \" at index \" << i;\n }\n}\nvoid CheckArgIndices(const std::vector& expected,\n const std::vector& actual) {\n ASSERT_EQ(expected.size(), actual.size());\n for (int i = 0; i < expected.size(); ++i) {\n ASSERT_EQ(expected[i].index, actual[i].index) << \" at index \" << i;\n ASSERT_EQ(expected[i].sub_index, actual[i].sub_index) << \" at index \" << i;\n }\n}\nvoid CheckAlloc(const std::vector& expected,\n const std::vector& actual) {\n ASSERT_EQ(expected.size(), actual.size());\n for (int i = 0; i < expected.size(); ++i) {\n ASSERT_EQ(expected[i], actual[i].on_host()) << \" at index \" << i;\n }\n}\nvoid CheckIndex(const Node& node, int expected_index) {\n const AttrValue* attr_value;\n TF_ASSERT_OK(node.attrs().Find(\"index\", &attr_value));\n int index = static_cast(attr_value->i());\n ASSERT_EQ(expected_index, index);\n}\nTEST_F(PartitioningUtilsTest, UpdateArgsAndRets) {\n auto graph = std::make_unique(OpRegistry::Global());\n SubGraph(graph.get(), DT_FLOAT, {3}, {5});\n std::vector arg_indices;\n std::vector ret_indices;\n std::vector arg_alloc_attrs;\n std::vector ret_alloc_attrs;\n Status status = UpdateArgAndRetvalMetadata(\n graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,\n &ret_alloc_attrs, false);\n ASSERT_TRUE(status.ok()) << status.ToString();\n CheckArgIndices({{3, -1}}, arg_indices);\n CheckRetIndices({5}, ret_indices);\n CheckAlloc({false}, arg_alloc_attrs);\n CheckAlloc({false}, ret_alloc_attrs);\n std::unordered_map nodes = graph->BuildNodeNameIndex();\n ASSERT_EQ(1, nodes.count(\"x\"));\n CheckIndex(*nodes[\"x\"], 0);\n ASSERT_EQ(1, nodes.count(\"retval1\"));\n CheckIndex(*nodes[\"retval1\"], 0);\n}\nTEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsNotOnDevice) {\n auto graph = std::make_unique(OpRegistry::Global());\n SubGraph(graph.get(), DT_INT32, {3}, {5});\n std::vector arg_indices;\n std::vector ret_indices;\n std::vector arg_alloc_attrs;\n std::vector ret_alloc_attrs;\n Int32FulltypePass int32_fulltype;\n TF_ASSERT_OK(\n int32_fulltype.ProcessGraph(graph.get(), false));\n Status status = UpdateArgAndRetvalMetadata(\n graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,\n &ret_alloc_attrs, false);\n ASSERT_TRUE(status.ok()) << status.ToString();\n CheckAlloc({true}, arg_alloc_attrs);\n CheckAlloc({true}, ret_alloc_attrs);\n}\nTEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsOnDevice) {\n auto graph = std::make_unique(OpRegistry::Global());\n SubGraph(graph.get(), DT_INT32, {3}, {5});\n std::vector arg_indices;\n std::vector ret_indices;\n std::vector arg_alloc_attrs;\n std::vector ret_alloc_attrs;\n Status status = UpdateArgAndRetvalMetadata(\n graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,\n &ret_alloc_attrs, true);\n ASSERT_TRUE(status.ok()) << status.ToString();\n CheckAlloc({false}, arg_alloc_attrs);\n CheckAlloc({false}, ret_alloc_attrs);\n}\nTEST_F(PartitioningUtilsTest, UpdateArgsAndRets_Order) {\n auto graph = std::make_unique(OpRegistry::Global());\n SubGraph(graph.get(), DT_FLOAT, {9, 7, 5, 3, 1}, {2, 4, 6, 8, 10});\n const std::map sub_indices = {\n {7, 2}, {3, 1}, {1, 0}, {5, 2}, {9, 0}};\n const AttrValue* attr_value;\n for (Node* n : graph->op_nodes()) {\n if (n->IsArg()) {\n TF_ASSERT_OK(n->attrs().Find(\"index\", &attr_value));\n n->AddAttr(\"sub_index\",\n sub_indices.at(static_cast(attr_value->i())));\n }\n }\n std::vector arg_indices;\n std::vector ret_indices;\n std::vector arg_alloc_attrs;\n std::vector ret_alloc_attrs;\n Status status = UpdateArgAndRetvalMetadata(\n graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,\n &ret_alloc_attrs, false);\n ASSERT_TRUE(status.ok()) << status.ToString();\n CheckArgIndices({{1, 0}, {3, 1}, {5, 2}, {7, 2}, {9, 0}}, arg_indices);\n CheckRetIndices({2, 4, 6, 8, 10}, ret_indices);\n CheckAlloc({false, false, false, false, false}, arg_alloc_attrs);\n CheckAlloc({false, false, false, false, false}, ret_alloc_attrs);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":235,"cells":{"ID":{"kind":"string","value":"908bf121-508e-4c4a-9d0d-8904854bb57f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"ifrt_ops_kernel"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc"},"Code":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/python/ifrt/future.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tensorflow/core/framework/attr_value.pb.h\"\n#include \"tensorflow/core/framework/device_base.h\"\n#include \"tensorflow/core/framework/op_kernel.h\"\n#include \"tensorflow/core/framework/resource_handle.h\"\n#include \"tensorflow/core/framework/resource_var.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/platform/protobuf.h\" \n#include \"tensorflow/core/tfrt/ifrt/checkpoint_loader.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_config.pb.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_model_context.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h\"\n#include \"tensorflow/core/tfrt/mlrt/bytecode/bytecode.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/context.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/future.h\"\n#include \"tensorflow/core/tfrt/mlrt/kernel/context.h\"\n#include \"tensorflow/core/tfrt/mlrt/kernel/kernel.h\"\n#include \"tensorflow/core/tfrt/utils/fallback_tensor.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/tstring.h\"\nusing tensorflow::ifrt_serving::IfrtModelContext;\nnamespace tensorflow {\nnamespace tf_mlrt {\nnamespace {\nstruct MlrtIfrtRestoreVariableKernel : mlrt::KernelFrame {\n using KernelFrame::KernelFrame;\n static constexpr char kName[] = \"tf_mlrt.ifrt_restore_variable\";\n tensorflow::tfrt_stub::FallbackTensor prefix() const {\n DCHECK_GT(arguments().size(), 3);\n return arguments()[0].Get();\n }\n tensorflow::tfrt_stub::FallbackTensor tensor_names() const {\n DCHECK_GT(arguments().size(), 3);\n return arguments()[1].Get();\n }\n tensorflow::tfrt_stub::FallbackTensor shape_and_slices() const {\n DCHECK_GT(arguments().size(), 3);\n return arguments()[2].Get();\n }\n mlrt::bc::Vector restored_dtypes() const {\n return attributes().GetAs>(0);\n }\n mlrt::bc::Vector truncate_in_cast() const {\n return attributes().GetAs>(1);\n }\n std::vector var_handles() const {\n DCHECK_GT(arguments().size(), 3);\n std::vector result;\n result.reserve(arguments().size() - 3);\n for (int i = 3; i < arguments().size(); ++i) {\n result.push_back(\n arguments()[i].Get());\n }\n return result;\n }\n Context& context() { return execution_context().GetUserContext(); }\n void Invoke();\n private:\n static constexpr int kNumRestoreClusters = 4;\n absl::Status InvokeHelper();\n absl::Status ValidateInput();\n};\nvoid MlrtIfrtRestoreVariableKernel::Invoke() {\n absl::Status status = InvokeHelper();\n if (!status.ok()) {\n execution_context().Fail(std::move(status));\n return;\n }\n}\nabsl::Status MlrtIfrtRestoreVariableKernel::ValidateInput() {\n if (prefix().tensor().NumElements() != 1) {\n return absl::InvalidArgumentError(\n \"The prefix tensor must be a scalar tensor.\");\n }\n if (!TensorShapeUtils::IsVector(tensor_names().tensor().shape()) ||\n !TensorShapeUtils::IsVector(shape_and_slices().tensor().shape())) {\n return absl::InvalidArgumentError(\n absl::StrCat(\"Input tensor_names and shape_and_slices \"\n \"should be an 1-D tensors, got \",\n tensor_names().tensor().shape().DebugString(), \" and \",\n shape_and_slices().tensor().shape().DebugString()));\n }\n if (tensor_names().tensor().NumElements() !=\n shape_and_slices().tensor().NumElements()) {\n return absl::InvalidArgumentError(\n \"The tensor_names and shape_and_slices tensors must have the same \"\n \"number of elements.\");\n }\n if (tensor_names().tensor().NumElements() != var_handles().size()) {\n return absl::InvalidArgumentError(\n \"The tensor_names and var_handles must have the same number of \"\n \"elements.\");\n }\n if (tensor_names().tensor().NumElements() != restored_dtypes().size()) {\n return absl::InvalidArgumentError(\n \"The tensor_names and restored_dtypes must have the same number of \"\n \"elements.\");\n }\n if (tensor_names().tensor().NumElements() != truncate_in_cast().size()) {\n return absl::InvalidArgumentError(\n \"The tensor_names and truncate_in_cast must have the same number of \"\n \"elements.\");\n }\n return absl::OkStatus();\n}\nabsl::Status MlrtIfrtRestoreVariableKernel::InvokeHelper() {\n std::optional model_restore_context =\n context()\n .resource_context()\n .GetResource(\n ifrt_serving::kIfrtModelRestoreContextName);\n if (!model_restore_context.has_value()) {\n return absl::InternalError(\n \"Did not find IfrtModelRestoreContext resource.\");\n }\n if (*model_restore_context == nullptr) {\n return absl::InternalError(\"IfrtModelRestoreContext must not be null.\");\n }\n ifrt_serving::CheckpointLoader* checkpoint_loader =\n (*model_restore_context)->checkpoint_loader();\n if (!checkpoint_loader) {\n return absl::InternalError(\"CheckpointLoader must not be null.\");\n }\n TF_RETURN_IF_ERROR(ValidateInput());\n std::vector restored_dtypes_vec(\n restored_dtypes().begin(), restored_dtypes().end());\n std::vector truncate_in_cast_vec(truncate_in_cast().begin(),\n truncate_in_cast().end());\n return checkpoint_loader->Load(prefix(), var_handles(), tensor_names(),\n shape_and_slices(), restored_dtypes_vec,\n truncate_in_cast_vec, context());\n}\nclass MlrtIfrtLoadVariableKernel : public mlrt::KernelFrame {\n public:\n using KernelFrame::KernelFrame;\n static constexpr char kName[] = \"tf_mlrt.ifrt_load_variable\";\n const tensorflow::Tensor& variable_handler_tensor() const {\n DCHECK_GE(arguments().size(), 1);\n const tensorflow::Tensor& ret =\n arguments()[0].Get().tensor();\n DCHECK_EQ(ret.NumElements(), 1);\n return ret;\n }\n bool used_by_host() const {\n DCHECK_EQ(attributes().size(), 1);\n return attributes().GetAs(0);\n }\n Context& context() { return execution_context().GetUserContext(); }\n void Invoke();\n private:\n absl::Status InvokeHelper();\n};\nvoid MlrtIfrtLoadVariableKernel::Invoke() {\n absl::Status status = InvokeHelper();\n if (!status.ok()) {\n execution_context().Fail(std::move(status));\n return;\n }\n}\nabsl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() {\n DCHECK_EQ(2, results().size());\n std::optional ifrt_model_context =\n context().resource_context().GetResource(\n \"IfrtModelContext\");\n if (!ifrt_model_context.has_value()) {\n return absl::FailedPreconditionError(\n \"LoadVariableOp: failed to fetch IfrtModelContext: \");\n }\n auto tensor_promise =\n mlrt::Promise::Allocate();\n auto tensor_future = tensor_promise.GetFuture();\n ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =\n (*ifrt_model_context)->GetRestoreTensorRegistry();\n auto& resource_handle = variable_handler_tensor().scalar()();\n std::string runtime_name =\n ifrt_serving::GetRuntimeNameFromVarHandle(resource_handle);\n if (used_by_host()) {\n if (ifrt_restore_tensor_registry.SetUsedByHost(runtime_name).ok()) {\n xla::ifrt::Future restored_tensor_future =\n ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);\n restored_tensor_future.OnReady(\n [tensor_promise = std::move(tensor_promise)](\n absl::StatusOr restored_tensor) mutable {\n if (!restored_tensor.ok()) {\n std::move(tensor_promise).SetError(restored_tensor.status());\n return;\n }\n std::move(tensor_promise)\n .Set(\n tensorflow::tfrt_stub::FallbackTensor(*restored_tensor));\n });\n } else {\n auto resource_manager = context()\n .fallback_request_state()\n .device_manager()\n .HostCPU()\n ->resource_manager();\n DCHECK(resource_manager);\n Var* variable;\n TF_RETURN_IF_ERROR(resource_manager->Lookup(\n resource_handle.container(), resource_handle.name(), &variable));\n if (tensorflow::Tensor* t = variable->tensor(); t != nullptr) {\n std::move(tensor_promise)\n .Set(\n tensorflow::tfrt_stub::FallbackTensor(*t));\n } else {\n std::move(tensor_promise)\n .SetError(absl::InternalError(\n absl::StrCat(\"Variable \", resource_handle.name(),\n \" is not found in either \"\n \"IfrtRestoreTensorRegistry or ResourceManager\")));\n }\n }\n } else {\n std::move(tensor_promise)\n .Set(\n tensorflow::tfrt_stub::FallbackTensor());\n }\n tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});\n key_tensor.scalar()() = runtime_name;\n results()[0].Set(tensorflow::tfrt_stub::FallbackTensor(key_tensor));\n results()[1].Set(std::move(tensor_future));\n return absl::OkStatus();\n}\nvoid RegisterTfMlrtIfrtKernels(mlrt::KernelRegistry& registry) {\n registry.Register();\n registry.Register();\n}\n} \nconst bool kUnused = [] {\n RegisterTfMlrtIfrtKernels(GetTfMlrtOptionalKernelRegistry());\n return true;\n}();\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/substitute.h\"\n#include \"absl/synchronization/notification.h\"\n#include \"absl/types/span.h\"\n#include \"xla/python/ifrt/client.h\"\n#include \"xla/python/ifrt/future.h\"\n#include \"xla/python/ifrt/test_util.h\"\n#include \"xla/tsl/framework/test_util/mock_serving_device_selector.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tensorflow/core/framework/resource_var.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_matcher.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/platform/protobuf.h\" \n#include \"tensorflow/core/platform/resource_loader.h\"\n#include \"tensorflow/core/public/session_options.h\"\n#include \"tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h\"\n#include \"tensorflow/core/tfrt/fallback/fallback_state.h\"\n#include \"tensorflow/core/tfrt/fallback/op_kernel_runner.h\"\n#include \"tensorflow/core/tfrt/ifrt/checkpoint_loader.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_config.pb.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_model_context.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h\"\n#include \"tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h\"\n#include \"tensorflow/core/tfrt/mlrt/bytecode/bytecode.h\"\n#include \"tensorflow/core/tfrt/mlrt/bytecode/executable.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/context.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/execute.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h\"\n#include \"tensorflow/core/tfrt/mlrt/interpreter/value.h\"\n#include \"tensorflow/core/tfrt/mlrt/kernel/context.h\"\n#include \"tensorflow/core/tfrt/mlrt/kernel/kernel.h\"\n#include \"tensorflow/core/tfrt/utils/fallback_tensor.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/refcount.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/threadpool.h\"\n#include \"tsl/platform/tstring.h\"\n#include \"tfrt/host_context/concurrent_work_queue.h\" \n#include \"tfrt/host_context/resource_context.h\" \nnamespace tensorflow {\nnamespace tf_mlrt {\nnamespace {\nusing tensorflow::test::AsScalar;\nusing tensorflow::test::AsTensor;\nusing tensorflow::test::ExpectEqual;\nusing tensorflow::test::TensorEq;\nconstexpr absl::string_view kContainer = \"test\";\nconstexpr absl::string_view kSharedName = \"y\";\nconstexpr absl::string_view kVariableRuntimeName = \"test__y\";\ntsl::thread::ThreadPool& GetThreadPool() {\n constexpr int kMaxParallelism = 16;\n static tsl::thread::ThreadPool* thread_pool =\n new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),\n \"IfrtSharding\", kMaxParallelism);\n return *thread_pool;\n}\nstd::string EncodeRestoreDtypesInt32(int num_outputs) {\n mlrt::bc::Buffer buffer;\n mlrt::bc::Allocator allocator(&buffer);\n auto ctor = mlrt::bc::New>(\n &allocator, num_outputs);\n for (int i = 0; i < num_outputs; ++i) {\n ctor.ConstructAt(i, tensorflow::DT_INT32);\n }\n return std::string(buffer.data(), buffer.size());\n}\nstd::string EncodeTruncateInCast(int num_outputs) {\n mlrt::bc::Buffer buffer;\n mlrt::bc::Allocator allocator(&buffer);\n auto ctor = mlrt::bc::New>(&allocator, num_outputs);\n for (int i = 0; i < num_outputs; ++i) {\n ctor.ConstructAt(i, false);\n }\n return std::string(buffer.data(), buffer.size());\n}\nmlrt::bc::Buffer CreateExecutableForIfrtRestoreVariableOp(\n int num_variables = 1) {\n mlrt::bc::Buffer buffer;\n mlrt::bc::Allocator allocator(&buffer);\n auto executable_ctor = mlrt::bc::New(&allocator);\n mlrt::testing::SymbolTable kernels;\n std::vector kernel_names = {\n \"tf_mlrt.createop\", \"tf_mlrt.executeop\", \"tf_mlrt.ifrt_restore_variable\",\n \"return\"};\n executable_ctor.construct_kernel_names(kernel_names.size())\n .Assign(kernel_names);\n kernels.Def(kernel_names);\n static constexpr int kNumAttributes =\n 5; \n mlrt::testing::AttributeTable attributes(executable_ctor.construct_attributes(\n kNumAttributes + 2 * (num_variables - 1)));\n std::string restore_dtypes = EncodeRestoreDtypesInt32(num_variables);\n attributes.Add(\"restore_dtypes\", restore_dtypes);\n std::vector truncate_in_cast(num_variables, false);\n attributes.Add(\"truncate_in_cast\", EncodeTruncateInCast(num_variables));\n for (int i = 0; i < num_variables; ++i) {\n attributes.Add(\n absl::StrCat(\"var_handle_op_node_def\", i),\n absl::Substitute(\n R\"pb(name: \"$0\"\n op: \"VarHandleOp\"\n device: \"/job:localhost/replica:0/task:0/device:CPU:0\"\n attr {\n key: \"container\"\n value { s: \"$1\" }\n }\n attr {\n key: \"shared_name\"\n value { s: \"$2\" }\n }\n attr {\n key: \"dtype\"\n value { type: DT_INT16 }\n }\n attr {\n key: \"shape\"\n value { shape { dim { size: 3 } } }\n }\n )pb\",\n absl::StrCat(\"VarHandleOp\", i), kContainer,\n absl::StrCat(kSharedName, i)));\n attributes.Add(absl::StrCat(\"var_handle_op_key\", i), i);\n }\n auto functions_ctor = executable_ctor.construct_functions(1);\n {\n auto function_ctor = functions_ctor.ConstructAt(0);\n function_ctor.construct_name(\"main\");\n mlrt::testing::SymbolTable regs;\n function_ctor.construct_input_regs(3).Assign(\n regs.Def({\"prefix_tensor\", \"name_tensor\", \"slice_tensor\"}));\n const int kNumKernels = 4;\n auto kernels_ctor =\n function_ctor.construct_kernels(kNumKernels + 2 * (num_variables - 1));\n int kernel_index = 0;\n std::vector variable_handle_names;\n variable_handle_names.reserve(num_variables);\n for (int i = 0; i < num_variables; ++i) {\n variable_handle_names.push_back(absl::StrCat(\"variable_handle\", i));\n std::string variable_handle_op_node_def =\n absl::StrCat(\"var_handle_op_node_def\", i);\n std::string variable_handle_op_key = absl::StrCat(\"var_handle_op_key\", i);\n {\n auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);\n createop_ctor.set_code(kernels.Use(\"tf_mlrt.createop\"));\n createop_ctor.construct_arguments(0);\n createop_ctor.construct_results(0);\n createop_ctor.construct_attributes(2).Assign(\n {attributes.GetHandle(variable_handle_op_node_def),\n attributes.GetHandle(variable_handle_op_key)});\n kernel_index++;\n }\n {\n auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);\n executeop_ctor.set_code(kernels.Use(\"tf_mlrt.executeop\"));\n executeop_ctor.construct_arguments(0);\n executeop_ctor.construct_results(1).Assign(\n {regs.Def(variable_handle_names.back())});\n executeop_ctor.construct_attributes(2).Assign(\n {attributes.GetHandle(variable_handle_op_node_def),\n attributes.GetHandle(variable_handle_op_key)});\n executeop_ctor.construct_last_uses(1).Assign({0});\n kernel_index++;\n }\n }\n {\n std::vector args;\n args.reserve(3 + num_variables);\n args.push_back(\"prefix_tensor\");\n args.push_back(\"name_tensor\");\n args.push_back(\"slice_tensor\");\n for (int i = 0; i < num_variables; ++i) {\n args.push_back(variable_handle_names[i]);\n }\n auto restore_ctor = kernels_ctor.ConstructAt(kernel_index);\n restore_ctor.set_code(kernels.Use(\"tf_mlrt.ifrt_restore_variable\"));\n restore_ctor.construct_arguments(args.size()).Assign(regs.Use(args));\n restore_ctor.construct_results(0);\n restore_ctor.construct_attributes(2).Assign(\n {attributes.GetHandle(\"restore_dtypes\"),\n attributes.GetHandle(\"truncate_in_cast\")});\n kernel_index++;\n }\n {\n auto return_ctor = kernels_ctor.ConstructAt(kernel_index);\n return_ctor.set_code(kernels.Use(\"return\"));\n return_ctor.construct_arguments(0);\n kernel_index++;\n }\n function_ctor.set_num_regs(regs.size());\n }\n return buffer;\n}\nmlrt::bc::Buffer CreateExecutableForIfrtLoadVariableOp(\n bool redundant_ifrt_load_variable_op = false, bool used_by_host = false) {\n mlrt::bc::Buffer buffer;\n mlrt::bc::Allocator allocator(&buffer);\n auto executable_ctor = mlrt::bc::New(&allocator);\n mlrt::testing::SymbolTable kernels;\n std::vector kernel_names = {\n \"tf_mlrt.createop\", \"tf_mlrt.executeop\", \"tf_mlrt.ifrt_load_variable\",\n \"return\"};\n executable_ctor.construct_kernel_names(kernel_names.size())\n .Assign(kernel_names);\n kernels.Def(kernel_names);\n mlrt::testing::AttributeTable attributes(\n executable_ctor.construct_attributes(3));\n attributes.Add(\"var_handle_op_node_def\",\n absl::Substitute(\n R\"pb(name: \"VarHandleOp\"\n op: \"VarHandleOp\"\n device: \"/job:localhost/replica:0/task:0/device:CPU:0\"\n attr {\n key: \"container\"\n value { s: \"$0\" }\n }\n attr {\n key: \"shared_name\"\n value { s: \"$1\" }\n }\n attr {\n key: \"dtype\"\n value { type: DT_INT32 }\n }\n attr {\n key: \"shape\"\n value { shape { dim { size: 1 } } }\n }\n )pb\",\n kContainer, kSharedName));\n attributes.Add(\"var_handle_op_key\", 0);\n attributes.Add(\"used_by_host\", used_by_host);\n auto functions_ctor = executable_ctor.construct_functions(1);\n {\n auto function_ctor = functions_ctor.ConstructAt(0);\n function_ctor.construct_name(\"main\");\n mlrt::testing::SymbolTable regs;\n function_ctor.construct_output_regs(2).Assign(\n {regs.Def(\"output_tensor\"), regs.Def(\"output_future\")});\n const int kNumKernels = 4 + (redundant_ifrt_load_variable_op ? 1 : 0);\n auto kernels_ctor = function_ctor.construct_kernels(kNumKernels);\n int kernel_index = 0;\n {\n auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);\n createop_ctor.set_code(kernels.Use(\"tf_mlrt.createop\"));\n createop_ctor.construct_arguments(0);\n createop_ctor.construct_results(0);\n createop_ctor.construct_attributes(2).Assign(\n {attributes.GetHandle(\"var_handle_op_node_def\"),\n attributes.GetHandle(\"var_handle_op_key\")});\n kernel_index++;\n }\n {\n auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);\n executeop_ctor.set_code(kernels.Use(\"tf_mlrt.executeop\"));\n executeop_ctor.construct_arguments(0);\n executeop_ctor.construct_results(1).Assign({regs.Def(\"variable_handle\")});\n executeop_ctor.construct_attributes(2).Assign(\n {attributes.GetHandle(\"var_handle_op_node_def\"),\n attributes.GetHandle(\"var_handle_op_key\")});\n kernel_index++;\n }\n {\n auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);\n kernel_ctor.set_code(kernels.Use(\"tf_mlrt.ifrt_load_variable\"));\n kernel_ctor.construct_results(2).Assign(\n {regs.Use(\"output_tensor\"), regs.Use(\"output_future\")});\n kernel_ctor.construct_arguments(1).Assign({regs.Use(\"variable_handle\")});\n kernel_ctor.construct_attributes(1).Assign(\n {attributes.GetHandle(\"used_by_host\")});\n kernel_ctor.construct_last_uses(1).Assign(\n {redundant_ifrt_load_variable_op ? 0 : 1});\n kernel_index++;\n }\n if (redundant_ifrt_load_variable_op) {\n auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);\n kernel_ctor.set_code(kernels.Use(\"tf_mlrt.ifrt_load_variable\"));\n kernel_ctor.construct_results(2).Assign(\n {regs.Def(\"dummy\"), regs.Def(\"dummy_future2\")});\n kernel_ctor.construct_attributes(1).Assign(\n {attributes.GetHandle(\"used_by_host\")});\n kernel_ctor.construct_arguments(1).Assign({regs.Use(\"variable_handle\")});\n kernel_ctor.construct_last_uses(1).Assign({1});\n kernel_index++;\n }\n {\n auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);\n kernel_ctor.set_code(kernels.Use(\"return\"));\n kernel_ctor.construct_arguments(2).Assign(\n {regs.Use(\"output_tensor\"), regs.Use(\"output_future\")});\n kernel_index++;\n }\n DCHECK_EQ(kernel_index, kNumKernels);\n function_ctor.set_num_regs(regs.size());\n }\n return buffer;\n}\nclass KernelTest : public ::testing::Test {\n protected:\n void SetUp() override {\n mlrt::RegisterBuiltinKernels(registry_);\n RegisterTfMlrtKernels(registry_);\n execution_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(\n 4, 4);\n restore_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(\n 4, 4);\n TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(\n session_options_, fdef_lib_));\n runner_ = [](const std::function& f) { f(); };\n fallback_request_state_ =\n std::make_unique(\n &runner_, &fallback_state_->device_manager(), 0,\n &runner_table_, &resource_array_,\n nullptr,\n std::nullopt,\n &fallback_state_->process_function_library_runtime());\n TF_ASSERT_OK_AND_ASSIGN(client_, xla::ifrt::test_util::GetClient());\n resource_context_\n .CreateResource(\n \"IfrtModelContext\", client_, ifrt_core_selector_.get(),\n &GetThreadPool(), nullptr);\n tf_context_ = std::make_unique(fallback_request_state_.get(),\n &resource_context_);\n ifrt_model_context_ =\n resource_context_\n .GetResource(\n \"IfrtModelContext\")\n .value();\n ifrt_model_context_->set_checkpoint_loader_queue(restore_work_queue_.get());\n resource_context_\n .CreateResource(\n ifrt_serving::kIfrtModelRestoreContextName,\n std::make_unique(\n &ifrt_model_context_->GetRestoreTensorRegistry(),\n ifrt_model_context_->checkpoint_loader_queue()));\n serving_device_selector_ =\n std::make_unique();\n ifrt_core_selector_ =\n std::make_unique(\n serving_device_selector_.get(),\n client_->addressable_device_count());\n }\n std::unique_ptr\n serving_device_selector_;\n std::unique_ptr ifrt_core_selector_;\n mlrt::KernelRegistry registry_;\n std::unique_ptr execution_work_queue_;\n std::unique_ptr restore_work_queue_;\n tensorflow::SessionOptions session_options_;\n tensorflow::FunctionDefLibrary fdef_lib_;\n std::function)> runner_;\n tfrt_stub::OpKernelRunnerTable runner_table_;\n tfd::FallbackResourceArray resource_array_;\n std::unique_ptr fallback_state_;\n tfrt::ResourceContext resource_context_;\n std::shared_ptr client_;\n std::unique_ptr\n fallback_request_state_;\n std::unique_ptr tf_context_;\n tensorflow::ifrt_serving::IfrtModelContext* ifrt_model_context_;\n};\nTEST_F(KernelTest, IfrtLoadVariableOpCanGetTensorFromResourceManager) {\n auto buffer = CreateExecutableForIfrtLoadVariableOp(\n false, true);\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n tensorflow::Tensor input_tensor;\n TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));\n input_tensor.scalar()() = 1234;\n tsl::core::RefCountPtr variable(new Var(DT_INT32));\n *variable->tensor() = input_tensor;\n variable->is_initialized = true;\n ASSERT_OK(\n fallback_state_->device_manager().HostCPU()->resource_manager()->Create(\n std::string(kContainer), std::string(kSharedName), &(*variable)));\n std::vector args;\n std::vector last_uses;\n std::vector results;\n results.resize(2);\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n TF_ASSERT_OK(execution_context.status());\n ExpectEqual(results[0].Get().tensor(),\n AsScalar(tsl::tstring(kVariableRuntimeName)));\n auto returned_future = results[1].Get();\n ASSERT_TRUE(returned_future.IsReady());\n EXPECT_THAT(returned_future.Get().tensor(),\n TensorEq(input_tensor));\n}\nTEST_F(KernelTest, IfrtLoadVariableOp) {\n auto buffer = CreateExecutableForIfrtLoadVariableOp();\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n tensorflow::Tensor input_tensor;\n TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));\n input_tensor.scalar()() = 1234;\n auto input_tensor_promise =\n xla::ifrt::Future::CreatePromise();\n auto input_tensor_future =\n xla::ifrt::Future(input_tensor_promise);\n ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo\n restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),\n .shape = input_tensor.shape()},\n .tensor_future = input_tensor_future};\n input_tensor_promise.Set(input_tensor);\n TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(\n kVariableRuntimeName, restore_tensor_info));\n std::vector args;\n std::vector last_uses;\n std::vector results;\n results.resize(2);\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n TF_ASSERT_OK(execution_context.status());\n ExpectEqual(results[0].Get().tensor(),\n AsScalar(tsl::tstring(kVariableRuntimeName)));\n auto returned_future = results[1].Get();\n ASSERT_TRUE(returned_future.IsReady());\n EXPECT_THAT(returned_future.Get().tensor(),\n TensorEq(tensorflow::Tensor()));\n}\nTEST_F(KernelTest, DuplicateIfrtLoadVariableOpShallSucceed) {\n auto buffer = CreateExecutableForIfrtLoadVariableOp(\n true);\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n tensorflow::Tensor input_tensor;\n TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));\n input_tensor.scalar()() = 1234;\n auto input_tensor_promise =\n xla::ifrt::Future::CreatePromise();\n auto input_tensor_future =\n xla::ifrt::Future(input_tensor_promise);\n ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo\n restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),\n .shape = input_tensor.shape()},\n .tensor_future = input_tensor_future};\n input_tensor_promise.Set(input_tensor);\n TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(\n kVariableRuntimeName, restore_tensor_info));\n std::vector args;\n std::vector last_uses;\n std::vector results;\n results.resize(2);\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n TF_ASSERT_OK(execution_context.status());\n ExpectEqual(results[0].Get().tensor(),\n AsScalar(tsl::tstring(kVariableRuntimeName)));\n auto returned_future = results[1].Get();\n ASSERT_TRUE(returned_future.IsReady());\n EXPECT_THAT(returned_future.Get().tensor(),\n TensorEq(tensorflow::Tensor()));\n}\nTEST_F(KernelTest, IfrtRestoreVariableOp) {\n std::string checkpoint_prefix =\n tensorflow::GetDataDependencyFilepath(\n \"tensorflow/core/tfrt/mlrt/kernel/testdata/\"\n \"gen_checkpoint_data/variables\") +\n \"/variables\";\n auto buffer = CreateExecutableForIfrtRestoreVariableOp();\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n xla::ifrt::Future uninitialized_entry =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n kVariableRuntimeName);\n ASSERT_TRUE(uninitialized_entry.IsReady());\n EXPECT_THAT(uninitialized_entry.Await().status(),\n ::tsl::testing::StatusIs(absl::StatusCode::kNotFound));\n std::vector args;\n args.resize(3);\n tensorflow::Tensor prefix_tensor =\n AsTensor({tsl::tstring(checkpoint_prefix)});\n args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));\n tensorflow::Tensor name_tensor =\n AsTensor({tsl::tstring(\"w/.ATTRIBUTES/VARIABLE_VALUE\")});\n args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));\n tensorflow::Tensor slice_tensor = AsTensor({tsl::tstring(\"\")});\n args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));\n std::vector last_uses = {true, true, true};\n std::vector results;\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n TF_ASSERT_OK(execution_context.status());\n xla::ifrt::Future restored_future =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n absl::StrCat(kVariableRuntimeName, 0));\n absl::StatusOr restored_tensor = restored_future.Await();\n TF_ASSERT_OK(restored_tensor.status());\n EXPECT_THAT(*restored_tensor, TensorEq(AsTensor({1, 2, 3}, {3})));\n}\nTEST_F(KernelTest, IfrtRestoreVariableOp4Variables) {\n std::string checkpoint_prefix =\n tensorflow::GetDataDependencyFilepath(\n \"tensorflow/core/tfrt/mlrt/kernel/testdata/\"\n \"gen_checkpoint_data/variables\") +\n \"/variables\";\n static constexpr int kNumVariables = 4;\n auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n xla::ifrt::Future uninitialized_entry =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n kVariableRuntimeName);\n ASSERT_TRUE(uninitialized_entry.IsReady());\n EXPECT_THAT(uninitialized_entry.Await().status(),\n ::tsl::testing::StatusIs(absl::StatusCode::kNotFound));\n std::vector args;\n args.resize(3);\n tensorflow::Tensor prefix_tensor =\n AsTensor({tsl::tstring(checkpoint_prefix)});\n args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));\n tensorflow::Tensor name_tensor =\n AsTensor({tsl::tstring(\"w/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w1/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w2/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w3/.ATTRIBUTES/VARIABLE_VALUE\")});\n args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));\n tensorflow::Tensor slice_tensor = AsTensor(\n {tsl::tstring(\"\"), tsl::tstring(\"\"), tsl::tstring(\"\"), tsl::tstring(\"\")});\n args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));\n std::vector last_uses = {true, true, true};\n std::vector results;\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n TF_ASSERT_OK(execution_context.status());\n xla::ifrt::Future restored_future =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n absl::StrCat(kVariableRuntimeName, 0));\n absl::StatusOr restored_tensor = restored_future.Await();\n TF_ASSERT_OK(restored_tensor.status());\n EXPECT_THAT(*restored_tensor, TensorEq(AsTensor({1, 2, 3}, {3})));\n xla::ifrt::Future restored_future1 =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n absl::StrCat(kVariableRuntimeName, 1));\n absl::StatusOr restored_tensor1 =\n restored_future1.Await();\n TF_ASSERT_OK(restored_tensor1.status());\n EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor({4, 5, 6}, {3})));\n xla::ifrt::Future restored_future2 =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n absl::StrCat(kVariableRuntimeName, 2));\n absl::StatusOr restored_tensor2 =\n restored_future2.Await();\n TF_ASSERT_OK(restored_tensor2.status());\n EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor({7, 8, 9}, {3})));\n xla::ifrt::Future restored_future3 =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n absl::StrCat(kVariableRuntimeName, 3));\n absl::StatusOr restored_tensor3 =\n restored_future3.Await();\n TF_ASSERT_OK(restored_tensor3.status());\n EXPECT_THAT(*restored_tensor3,\n TensorEq(AsTensor({10, 11, 12}, {3})));\n}\nTEST_F(KernelTest, IfrtRestoreVariableOpInValidInput) {\n std::string checkpoint_prefix =\n tensorflow::GetDataDependencyFilepath(\n \"tensorflow/core/tfrt/mlrt/kernel/testdata/\"\n \"gen_checkpoint_data/variables\") +\n \"/variables\";\n static constexpr int kNumVariables = 4;\n auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);\n mlrt::bc::Executable executable(buffer.data());\n mlrt::LoadedExecutable loaded_executable(executable, registry_);\n mlrt::ExecutionContext execution_context(&loaded_executable);\n execution_context.set_work_queue(execution_work_queue_.get());\n execution_context.AddUserContext(std::move(tf_context_));\n xla::ifrt::Future uninitialized_entry =\n ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(\n kVariableRuntimeName);\n ASSERT_TRUE(uninitialized_entry.IsReady());\n EXPECT_THAT(uninitialized_entry.Await().status(),\n ::tsl::testing::StatusIs(absl::StatusCode::kNotFound));\n std::vector args;\n args.resize(3);\n tensorflow::Tensor prefix_tensor =\n AsTensor({tsl::tstring(checkpoint_prefix)});\n args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));\n tensorflow::Tensor name_tensor =\n AsTensor({tsl::tstring(\"w/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w1/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w2/.ATTRIBUTES/VARIABLE_VALUE\"),\n tsl::tstring(\"w3/.ATTRIBUTES/VARIABLE_VALUE\")});\n args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));\n tensorflow::Tensor slice_tensor = AsTensor(\n {tsl::tstring(\"\"), tsl::tstring(\"\"), tsl::tstring(\"\")});\n args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));\n std::vector last_uses = {true, true, true};\n std::vector results;\n absl::Notification notification;\n execution_context.set_exit_handler(\n [&notification]() { notification.Notify(); });\n execution_context.Call(executable.functions()[0], last_uses,\n absl::MakeSpan(args), absl::MakeSpan(results));\n mlrt::Execute(execution_context);\n notification.WaitForNotification();\n EXPECT_THAT(execution_context.status(),\n ::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":236,"cells":{"ID":{"kind":"string","value":"9fb23443-70bb-4a1a-b176-8ffde412eac7"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"clamp"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/experimental/shlo/legacy/src/clamp.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc"},"Code":{"kind":"string","value":"#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/include/shlo.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/src/dispatch.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/src/storage.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/src/util.h\"\nnamespace stablehlo {\nnamespace {\ntemplate \nabsl::Status CheckParameters(const Value& min, const Value& operand,\n const Value& max, Value& result) {\n if (!(min.rank() == 0 or min.shape() == operand.shape())) {\n return absl::InvalidArgumentError(\n \"Constraint violation: rank(min) = 0 or shape(min) = shape(operand)\");\n } else if (!(max.rank() == 0 or max.shape() == operand.shape())) {\n return absl::InvalidArgumentError(\n \"Constraint violation: rank(max) = 0 or shape(max) = shape(operand)\");\n } else if (!(min.baseline_element_type() ==\n operand.baseline_element_type() and\n min.baseline_element_type() == max.baseline_element_type())) {\n return absl::InvalidArgumentError(\n \"Constraint violation: baseline_element_type(min) = \"\n \"baseline_element_type(operand) = baseline_element_type(max)\");\n } else if (!(operand.baseline_type() == result.baseline_type())) {\n return absl::InvalidArgumentError(\n \"Constraint violation: baseline_type(operand) = baseline_type(result)\");\n }\n if constexpr (std::is_same_v) {\n if (!(min.is_per_tensor_quantized() and max.is_per_tensor_quantized() and\n operand.is_per_tensor_quantized() and\n result.is_per_tensor_quantized())) {\n return absl::InvalidArgumentError(\"Expected per-tensor quantization\");\n }\n }\n if (operand.layout().has_strides() || result.layout().has_strides()) {\n return absl::InvalidArgumentError(\"Stides not supported yet\");\n }\n return absl::OkStatus();\n}\ntemplate \nabsl::Status Clamp(const Value& min, const Value& operand, const Value& max,\n Value& result) {\n if (auto check = CheckParameters(min, operand, max, result); !check.ok()) {\n return check;\n }\n using S = Storage;\n const bool min_is_tensor = (min.rank() > 0);\n const bool max_is_tensor = (max.rank() > 0);\n const size_t n = result.num_elements();\n auto operand_buffer = operand.buffer();\n auto min_buffer = min.buffer();\n auto max_buffer = max.buffer();\n auto result_buffer = result.buffer();\n if constexpr (std::is_same_v) {\n if (storage_type != result.element_type()) {\n return absl::InvalidArgumentError(\"Unexpected tensor element type\");\n }\n typename S::Type min_value;\n typename S::Type max_value;\n for (size_t i = 0; i < n; ++i) {\n if (min_is_tensor || (i == 0)) {\n min_value = S::Get(min_buffer, i);\n }\n if (max_is_tensor || (i == 0)) {\n max_value = S::Get(max_buffer, i);\n }\n auto operand_value = S::Get(operand_buffer, i);\n auto result_value =\n std::min(max_value, std::max(min_value, operand_value));\n S::Set(result_buffer, i, result_value);\n }\n } else {\n static_assert(std::is_same_v);\n if (storage_type != result.storage_type()) {\n return absl::InvalidArgumentError(\"Unexpected storage type\");\n } else if (expressed_type != result.expressed_type()) {\n return absl::InvalidArgumentError(\"Unexpected expressed type\");\n }\n using ET = typename Storage::Type;\n const QuantizedParameter& min_quant_param =\n min.type().element_type().parameters(0);\n const QuantizedParameter& max_quant_param =\n max.type().element_type().parameters(0);\n const QuantizedParameter& operand_quant_param =\n operand.type().element_type().parameters(0);\n const QuantizedParameter& result_quant_param =\n result.type().element_type().parameters(0);\n ET result_scale_inv = ET(1.0) / static_cast(result_quant_param.scale);\n ET min_expressed;\n ET max_expressed;\n for (size_t i = 0; i < n; ++i) {\n if (min_is_tensor || (i == 0)) {\n auto min_storage = S::Get(min_buffer, i);\n min_expressed = Dequantize(\n min_storage, min_quant_param);\n }\n if (max_is_tensor || (i == 0)) {\n auto max_storage = S::Get(max_buffer, i);\n max_expressed = Dequantize(\n max_storage, max_quant_param);\n }\n auto operand_storage = S::Get(operand_buffer, i);\n auto result_storage =\n DequantizeOpQuantizePartial(\n operand_storage, operand_quant_param, result_scale_inv,\n result_quant_param.zero_point, [=](auto x) {\n return std::min(max_expressed, std::max(min_expressed, x));\n });\n S::Set(result_buffer, i, result_storage);\n }\n if (auto status = CompleteQuantization(result);\n !status.ok()) {\n return status;\n }\n }\n return absl::OkStatus();\n}\n} \nabsl::Status Clamp(const Tensor& min, const Tensor& operand, const Tensor& max,\n Tensor& result) {\n DISPATCH_INT_FLOAT(Clamp, result.element_type(), min, operand, max, result);\n}\nabsl::Status Clamp(const QuantizedTensor& min, const QuantizedTensor& operand,\n const QuantizedTensor& max, QuantizedTensor& result) {\n DISPATCH_QUANTIZED(Clamp, result.storage_type(), result.expressed_type(), min,\n operand, max, result);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \"tensorflow/lite/experimental/shlo/legacy/include/shlo.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/src/debug.h\" \n#include \"tensorflow/lite/experimental/shlo/legacy/src/storage.h\"\n#include \"tensorflow/lite/experimental/shlo/legacy/test/util.h\"\nnamespace stablehlo {\nnamespace testing {\ntemplate \nvoid test(std::initializer_list&& shape,\n std::vector::Type>&& min_values,\n std::vector::Type>&& operand_values,\n std::vector::Type>&& max_values,\n std::vector::Type>&& expected_values) {\n Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();\n Tensor min(TensorType(std::move(min_shape), element_type), min_values.data());\n Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();\n Tensor max(TensorType(std::move(max_shape), element_type), max_values.data());\n Tensor operand(TensorType(Shape(shape), element_type), operand_values.data());\n Tensor expected(TensorType(Shape(shape), element_type),\n expected_values.data());\n std::vector::Type> result_values(\n expected_values.size());\n Tensor result(TensorType(Shape(shape), element_type), result_values.data());\n ASSERT_OK(Clamp(min, operand, max, result));\n EXPECT_EQ(result, expected)\n << \"min: \" << min << \"\\nmax: \" << max << \"\\noperand: \" << operand;\n}\ntemplate \nvoid test(\n QuantizedParameter&& quantized_parameter,\n std::initializer_list&& shape,\n std::vector::Type>&& min_values,\n std::vector::Type>&& operand_values,\n std::vector::Type>&& max_values,\n std::vector::Type>&& expected_values) {\n auto min_quant_values = QuantizeVector(\n min_values, quantized_parameter);\n auto operand_quant_values = QuantizeVector(\n operand_values, quantized_parameter);\n auto max_quant_values = QuantizeVector(\n max_values, quantized_parameter);\n auto expected_quant_values = QuantizeVector(\n expected_values, quantized_parameter);\n std::vector::Type> result_quant_values(\n expected_quant_values.size());\n QuantizedTensorElementType element_type(storage_type, expressed_type,\n std::move(quantized_parameter));\n Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();\n QuantizedTensor min(\n QuantizedTensorType(std::move(min_shape),\n QuantizedTensorElementType(element_type)),\n min_quant_values.data());\n Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();\n QuantizedTensor max(\n QuantizedTensorType(std::move(max_shape),\n QuantizedTensorElementType(element_type)),\n max_quant_values.data());\n QuantizedTensor operand(\n QuantizedTensorType(Shape(shape),\n QuantizedTensorElementType(element_type)),\n operand_quant_values.data());\n QuantizedTensor expected(\n QuantizedTensorType(Shape(shape),\n QuantizedTensorElementType(element_type)),\n expected_quant_values.data());\n QuantizedTensor result(\n QuantizedTensorType(Shape(shape),\n QuantizedTensorElementType(element_type)),\n result_quant_values.data());\n ASSERT_OK(Clamp(min, operand, max, result));\n EXPECT_EQ(result, expected)\n << \"min: \" << min << \"\\nmax: \" << max << \"\\noperand: \" << operand;\n}\nTEST(Clamp, Unquantized) {\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n test({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});\n}\nTEST(Clamp, Quantized) {\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test(\n {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n test({.scale = 0.1, .zero_point = 0},\n {3}, {0, 1, 1}, {-3, 0, 3},\n {1, 1, 2}, {0, 1, 2});\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/clamp.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":237,"cells":{"ID":{"kind":"string","value":"d760ed97-1225-41c7-86ae-6346991c7cf1"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"validator"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"tensorflow/lite/acceleration/configuration/configuration_generated.h\"\n#include \"tensorflow/lite/core/acceleration/configuration/delegate_registry.h\"\n#include \"tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h\"\n#include \"tensorflow/lite/core/api/profiler.h\"\n#include \"tensorflow/lite/core/c/c_api.h\"\n#include \"tensorflow/lite/core/c/c_api_types.h\"\n#include \"tensorflow/lite/core/c/common.h\"\n#include \"tensorflow/lite/core/interpreter.h\"\n#include \"tensorflow/lite/core/interpreter_builder.h\"\n#include \"tensorflow/lite/core/kernels/register.h\"\n#include \"tensorflow/lite/core/subgraph.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h\"\n#include \"tensorflow/lite/logger.h\"\n#include \"tensorflow/lite/minimal_logging.h\"\n#include \"tensorflow/lite/mutable_op_resolver.h\"\n#include \"tensorflow/lite/tools/benchmark/register_custom_op.h\"\n#include \"tensorflow/lite/tools/model_loader.h\"\n#ifndef TEMP_FAILURE_RETRY\n#ifdef __ANDROID__\n#error \"TEMP_FAILURE_RETRY not set although on Android\"\n#else \n#define TEMP_FAILURE_RETRY(exp) exp\n#endif \n#endif \nnamespace tflite {\nnamespace acceleration {\nnamespace {\nstd::unique_ptr LoadDelegatePlugin(\n const std::string& name, const tflite::TFLiteSettings& tflite_settings) {\n return tflite::delegates::DelegatePluginRegistry::CreateByName(\n name + \"Plugin\", tflite_settings);\n}\nvoid AppendTensorDataToVector(const TfLiteTensor* tensor,\n std::vector>& output_vector) {\n std::vector char_output(TfLiteTensorByteSize(tensor));\n memcpy(char_output.data(), TfLiteTensorData(tensor),\n TfLiteTensorByteSize(tensor));\n output_vector.emplace_back(std::move(char_output));\n}\ninline bool HasTensorData(tools::ModelLoader* model_loader,\n const Subgraph& graph, int index) {\n const TfLiteTensor* tensor = graph.tensor(index);\n return tensor->allocation != nullptr ||\n (model_loader->type() == tools::ModelLoader::Type::kPipeModelLoader &&\n tensor->data.data != nullptr);\n}\nconstexpr int64_t kMicrosInSecond = 1000 * 1000;\nconstexpr int64_t kNanosInMicro = 1000;\nint64_t ElapsedTimeMicros() {\n struct timespec ts;\n#if defined(__ANDROID__)\n int err = clock_gettime(CLOCK_BOOTTIME, &ts);\n#elif defined(_WIN32)\n int err = 1;\n#else\n int err = clock_gettime(CLOCK_MONOTONIC, &ts);\n#endif\n if (err) {\n return -1;\n }\n return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;\n}\nclass ValidatorProfiler : public ::tflite::Profiler {\n public:\n struct EventData {\n std::string tag;\n int64_t start_time_us = -1;\n int64_t end_time_us = -1;\n };\n const std::vector& events() { return events_; }\n uint32_t BeginEvent(const char* tag, EventType event_type,\n int64_t event_metadata1,\n int64_t event_metadata2) override {\n if (event_type != EventType::DEFAULT) {\n return 0;\n }\n events_.push_back({tag, ElapsedTimeMicros(), -1});\n return events_.size();\n }\n void EndEvent(uint32_t event_handle) override {\n if (event_handle == 0) {\n return;\n }\n events_[event_handle - 1].end_time_us = ElapsedTimeMicros();\n }\n private:\n std::vector events_;\n};\n} \nMinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) {\n if (!interpreter_ || !model_loader_->GetModel()) {\n return kMinibenchmarkPreconditionNotMet;\n }\n if (validation_entrypoint_->inputs().size() <= 1) {\n return kMinibenchmarkValidationSubgraphHasTooFewInputs;\n }\n if (validation_entrypoint_->inputs().size() >\n validation_entrypoint_->outputs().size()) {\n return kMinibenchmarkValidationSubgraphHasTooFewOutputs;\n }\n if (HasTensorData(model_loader_.get(), *validation_entrypoint_,\n validation_entrypoint_->inputs()[0])) {\n return kMinibenchmarkSuccess;\n }\n TFLITE_LOG_PROD(TFLITE_LOG_INFO,\n \"Running on CPU to get golden output for comparison.\");\n tflite::InterpreterBuilder(*model_loader_->GetModel(),\n *resolver_)(&golden_interpreter_);\n if (!golden_interpreter_) {\n return kMinibenchmarkInterpreterBuilderFailed;\n }\n Subgraph* golden_validation_entrypoint =\n golden_interpreter_->subgraph(validation_entrypoint_index_);\n if (golden_validation_entrypoint->AllocateTensors() != kTfLiteOk) {\n return kMinibenchmarkAllocateTensorsFailed;\n }\n for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) {\n TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor(\n golden_validation_entrypoint->inputs()[i]);\n memset(input_tensor->data.data, 0, input_tensor->bytes);\n }\n if (golden_validation_entrypoint->Invoke() != kTfLiteOk) {\n return kMinibenchmarkInvokeFailed;\n }\n for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {\n TfLiteTensor* input_tensor =\n validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]);\n TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor(\n golden_validation_entrypoint->outputs()[i]);\n if (input_tensor->bytes != golden_output_tensor->bytes) {\n return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs;\n }\n memcpy(input_tensor->data.data, golden_output_tensor->data.data,\n golden_output_tensor->bytes);\n }\n return kMinibenchmarkSuccess;\n}\nMinibenchmarkStatus Validator::LoadDelegate() {\n if (!compute_settings_) {\n return kMinibenchmarkPreconditionNotMet;\n }\n if (opaque_delegate_) {\n return kMinibenchmarkSuccess;\n }\n Delegate which_delegate = Delegate_NONE;\n bool is_stable_delegate_path_provided = false;\n auto tflite_settings = compute_settings_->tflite_settings();\n if (tflite_settings) {\n which_delegate = compute_settings_->tflite_settings()->delegate();\n if (tflite_settings->stable_delegate_loader_settings()) {\n is_stable_delegate_path_provided =\n tflite_settings->stable_delegate_loader_settings()->delegate_path() &&\n !tflite_settings->stable_delegate_loader_settings()\n ->delegate_path()\n ->str()\n .empty();\n }\n }\n std::string delegate_name;\n if (is_stable_delegate_path_provided && which_delegate == Delegate_GPU) {\n delegate_name = \"GpuModule\";\n } else if (is_stable_delegate_path_provided) {\n delegate_name = \"StableDelegate\";\n } else {\n switch (which_delegate) {\n case Delegate_NONE:\n return kMinibenchmarkSuccess;\n case Delegate_NNAPI:\n delegate_name = \"Nnapi\";\n break;\n case Delegate_GPU:\n delegate_name = \"Gpu\";\n break;\n case Delegate_XNNPACK:\n delegate_name = \"XNNPack\";\n break;\n case Delegate_EDGETPU:\n delegate_name = \"EdgeTpu\";\n break;\n default:\n return kMinibenchmarkDelegateNotSupported;\n }\n }\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \"Running mini-benchmark on %s\",\n delegate_name.c_str());\n if (!(delegate_plugin_ = LoadDelegatePlugin(\n delegate_name, *compute_settings_->tflite_settings()))) {\n return kMinibenchmarkDelegatePluginNotFound;\n }\n if (!(delegate_ = delegate_plugin_->Create())) {\n return kMinibenchmarkDelegateCreateFailed;\n }\n return kMinibenchmarkSuccess;\n}\nMinibenchmarkStatus Validator::LoadOpaqueDelegate() {\n if (!compute_settings_) {\n return kMinibenchmarkPreconditionNotMet;\n }\n bool is_stable_delegate_name_provided = false;\n auto tflite_settings = compute_settings_->tflite_settings();\n if (!tflite_settings) {\n return kMinibenchmarkSuccess;\n }\n auto stable_delegate_settings =\n tflite_settings->stable_delegate_loader_settings();\n is_stable_delegate_name_provided =\n stable_delegate_settings && stable_delegate_settings->delegate_name() &&\n !stable_delegate_settings->delegate_name()->str().empty();\n if (!is_stable_delegate_name_provided) {\n return kMinibenchmarkSuccess;\n }\n std::string delegate_name = stable_delegate_settings->delegate_name()->str();\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \"Running mini-benchmark on %s\",\n delegate_name.c_str());\n const TfLiteStableDelegate* stable_delegate =\n delegates::StableDelegateRegistry::RetrieveStableDelegate(delegate_name);\n if (!stable_delegate) {\n TFLITE_LOG_PROD(TFLITE_LOG_ERROR,\n \"Failed to load stable delegate plugin %s\",\n delegate_name.c_str());\n return kMinibenchmarkDelegatePluginNotFound;\n }\n const TfLiteOpaqueDelegatePlugin* delegate_plugin =\n stable_delegate->delegate_plugin;\n opaque_delegate_ = TfLiteOpaqueDelegatePtr(\n delegate_plugin->create(tflite_settings), delegate_plugin->destroy);\n return kMinibenchmarkSuccess;\n}\nMinibenchmarkStatus Validator::CreateInterpreter(int* delegate_error_out,\n int* delegated_kernels_out) {\n if (!delegate_error_out || !delegated_kernels_out ||\n !model_loader_->GetModel()) {\n return kMinibenchmarkPreconditionNotMet;\n }\n if (interpreter_) {\n return kMinibenchmarkSuccess;\n }\n *delegate_error_out = 0;\n if (compute_settings_->tflite_settings() &&\n compute_settings_->tflite_settings()->disable_default_delegates()) {\n resolver_ = std::make_unique<\n ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();\n } else {\n resolver_ = std::make_unique<::tflite::ops::builtin::BuiltinOpResolver>();\n }\n resolver_->AddCustom(\"validation/call\",\n ::tflite::acceleration::ops::Register_CALL(), 1);\n resolver_->AddCustom(\n \"validation/decode_jpeg\",\n ::tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG(), 1);\n RegisterSelectedOps(resolver_.get());\n tflite::InterpreterBuilder builder(*model_loader_->GetModel(), *resolver_);\n if (delegate_ != nullptr) {\n builder.AddDelegate(delegate_.get());\n }\n if (opaque_delegate_ != nullptr) {\n builder.AddDelegate(opaque_delegate_.get());\n }\n TfLiteStatus status = builder(&interpreter_);\n if (!interpreter_) {\n *delegate_error_out =\n delegate_plugin_ ? delegate_plugin_->GetDelegateErrno(delegate_.get())\n : 0;\n TFLITE_LOG_PROD(TFLITE_LOG_ERROR,\n \"Creating Interpreter failed with error code %d.\", status);\n return kMinibenchmarkInterpreterBuilderFailed;\n }\n main_model_ = interpreter_->subgraph(0);\n validation_entrypoint_index_ = -1;\n for (int i = 0; i < interpreter_->subgraphs_size(); i++) {\n Subgraph* subgraph = interpreter_->subgraph(i);\n if (subgraph->GetName() == kValidationGraphName) {\n validation_entrypoint_index_ = i;\n validation_entrypoint_ = subgraph;\n } else if (subgraph->GetName() == \"VALIDATION:metrics\") {\n has_accuracy_validation_ = true;\n }\n }\n if (!validation_entrypoint_) {\n return kMinibenchmarkValidationSubgraphNotFound;\n }\n if (validation_entrypoint_->inputs().empty()) {\n return kMinibenchmarkValidationSubgraphHasTooFewInputs;\n }\n if (!HasTensorData(model_loader_.get(), *validation_entrypoint_,\n validation_entrypoint_->inputs().back())) {\n return kMinibenchmarkValidationInputMissing;\n }\n if (validation_entrypoint_->AllocateTensors() != kTfLiteOk) {\n return kMinibenchmarkAllocateTensorsFailed;\n }\n absl::flat_hash_set checked_node_ids;\n int num_delegated_kernels = 0;\n for (int i = 0; i < interpreter_->execution_plan().size(); ++i) {\n int node_id = interpreter_->execution_plan()[i];\n if (checked_node_ids.find(node_id) != checked_node_ids.end()) {\n continue;\n }\n const TfLiteNode& node =\n interpreter_->node_and_registration(node_id)->first;\n if (node.delegate != nullptr) {\n num_delegated_kernels++;\n checked_node_ids.insert(node_id);\n }\n }\n *delegated_kernels_out = num_delegated_kernels;\n bool fully_delegated = (num_delegated_kernels == 1 &&\n interpreter_->execution_plan().size() == 1);\n if (!fully_delegated) {\n TFLITE_LOG_PROD(TFLITE_LOG_WARNING,\n \"The model will be %s executed by the delegate.\",\n num_delegated_kernels > 0 ? \"partially\" : \"not\");\n }\n return kMinibenchmarkSuccess;\n}\nValidator::Status Validator::RunValidation(Results* results_out) {\n BenchmarkStage stage = BenchmarkStage_INITIALIZATION;\n if (!results_out) {\n return Validator::Status{kMinibenchmarkPreconditionNotMet, stage};\n }\n if (!model_loader_) {\n return Validator::Status{kMinibenchmarkModelReadFailed, stage};\n }\n if (!model_loader_->Init()) {\n return Validator::Status{kMinibenchmarkModelInitFailed, stage};\n }\n#define MB_RETURN_IF_ERROR(s, bs) \\\n { \\\n MinibenchmarkStatus c = (s); \\\n if (c != kMinibenchmarkSuccess) return Validator::Status{c, (bs)}; \\\n }\n int64_t delegate_load_start_time_us = ElapsedTimeMicros();\n MB_RETURN_IF_ERROR(LoadOpaqueDelegate(), stage);\n MB_RETURN_IF_ERROR(LoadDelegate(), stage);\n MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error,\n &results_out->delegated_kernels),\n stage);\n int64_t delegate_load_end_time_us = ElapsedTimeMicros();\n ValidatorProfiler profiler;\n stage = BenchmarkStage_INFERENCE;\n if (has_accuracy_validation_) {\n MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out), stage);\n }\n main_model_->SetProfiler(&profiler, 0);\n TfLiteStatus status = validation_entrypoint_->Invoke();\n main_model_->SetProfiler(nullptr, 0);\n if (status != kTfLiteOk) {\n MB_RETURN_IF_ERROR(kMinibenchmarkInvokeFailed, stage);\n }\n int model_output_size = main_model_->outputs().size();\n if (has_accuracy_validation_) {\n const std::string kMetricPrefix = \"metrics/\";\n const std::string kOk(\"ok\");\n for (int i = model_output_size;\n i < validation_entrypoint_->outputs().size(); i++) {\n TfLiteTensor* tensor =\n validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]);\n std::string name = tensor->name;\n if (name.find(kMetricPrefix) != 0) { \n continue;\n }\n name = name.substr(kMetricPrefix.size());\n if (kOk == name) {\n results_out->ok = *(tensor->data.b);\n } else {\n std::vector values;\n int count = 1;\n for (int j = 0; j < tensor->dims->size; j++) {\n count *= tensor->dims->data[j];\n }\n values.reserve(count);\n for (int j = 0; j < count; j++) {\n values.push_back(tensor->data.f[j]);\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \" %s %.4f\", name.c_str(),\n tensor->data.f[j]);\n }\n results_out->metrics[name] = values;\n }\n }\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \" accuracy: %s\",\n results_out->ok ? \"ok\" : \"not ok\");\n } else {\n results_out->actual_inference_output.clear();\n results_out->actual_inference_output.reserve(model_output_size);\n for (int i = 0; i < model_output_size; i++) {\n AppendTensorDataToVector(\n validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]),\n results_out->actual_inference_output);\n }\n }\n results_out->delegate_prep_time_us =\n (delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1)\n ? -1\n : delegate_load_end_time_us - delegate_load_start_time_us;\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \" Delegate preparation took %d us\",\n static_cast(results_out->delegate_prep_time_us));\n for (const auto& e : profiler.events()) {\n if (e.tag == \"Invoke\" && e.start_time_us != -1 && e.end_time_us != -1) {\n results_out->execution_time_us.push_back(e.end_time_us - e.start_time_us);\n TFLITE_LOG_PROD(TFLITE_LOG_INFO, \" Inference took %d us\",\n static_cast(e.end_time_us - e.start_time_us));\n }\n }\n#undef MB_RETURN_IF_ERROR\n return Validator::Status{kMinibenchmarkSuccess};\n}\nint64_t Validator::BootTimeMicros() { return ElapsedTimeMicros(); }\nint64_t Validator::WallTimeMicros() {\n struct timespec ts;\n#ifndef _WIN32\n int err = clock_gettime(CLOCK_REALTIME, &ts);\n#else \n int err = 1;\n#endif \n if (err) {\n return -1;\n }\n return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"flatbuffers/flatbuffers.h\" \n#if FLATBUFFERS_LITTLEENDIAN == 0\n#include \"tensorflow/lite/core/model_builder.h\"\n#endif\n#include \"tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h\"\n#include \"tensorflow/lite/acceleration/configuration/configuration.pb.h\"\n#include \"tensorflow/lite/acceleration/configuration/configuration_generated.h\"\n#include \"tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h\"\n#include \"tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h\"\n#include \"tensorflow/lite/tools/model_loader.h\"\nnamespace tflite {\nnamespace acceleration {\nnamespace {\nusing flatbuffers::FlatBufferBuilder;\nconstexpr int kOutputTensorSize = 1001;\nclass ValidatorTest : public ::testing::Test {\n protected:\n void SetUp() override {\n std::string validation_model_path = MiniBenchmarkTestHelper::DumpToTempFile(\n \"mobilenet_quant_with_validation.tflite\",\n g_tflite_acceleration_embedded_mobilenet_validation_model,\n g_tflite_acceleration_embedded_mobilenet_validation_model_len);\n ASSERT_TRUE(!validation_model_path.empty());\n validation_model_loader_ =\n std::make_unique(validation_model_path);\n std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(\n \"mobilenet_quant.tflite\",\n g_tflite_acceleration_embedded_mobilenet_model,\n g_tflite_acceleration_embedded_mobilenet_model_len);\n ASSERT_TRUE(!plain_model_path.empty());\n plain_model_loader_ =\n std::make_unique(plain_model_path);\n compute_settings_fbb_.Finish(CreateComputeSettings(compute_settings_fbb_));\n default_compute_settings_ = flatbuffers::GetRoot(\n compute_settings_fbb_.GetBufferPointer());\n }\n std::unique_ptr validation_model_loader_;\n std::unique_ptr plain_model_loader_;\n FlatBufferBuilder compute_settings_fbb_;\n const ComputeSettings* default_compute_settings_;\n};\nTEST_F(ValidatorTest, HappyPathOnCpuWithEmbeddedValidation) {\n ASSERT_TRUE(validation_model_loader_->Init());\n Validator validator(std::move(validation_model_loader_),\n default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);\n EXPECT_TRUE(results.ok);\n EXPECT_GE(results.metrics.size(), 0);\n EXPECT_EQ(results.delegate_error, 0);\n EXPECT_TRUE(results.actual_inference_output.empty());\n}\nTEST_F(ValidatorTest, HappyPathOnCpuWithCustomValidation) {\n ASSERT_TRUE(plain_model_loader_->Init());\n ASSERT_TRUE(validation_model_loader_->Init());\n const SubGraph* main_model =\n plain_model_loader_->GetModel()->GetModel()->subgraphs()->Get(0);\n const int model_output_size = main_model->outputs()->size();\n int model_input_byte_size = 1;\n for (int shape_i :\n *main_model->tensors()->Get(main_model->inputs()->Get(0))->shape()) {\n model_input_byte_size *= shape_i;\n }\n int batch_size = 5;\n FlatBufferBuilder model_with_input;\n CustomValidationEmbedder embedder(\n batch_size,\n {std::vector(batch_size * model_input_byte_size, 1)});\n EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),\n model_with_input),\n kMinibenchmarkSuccess);\n std::string serialized_str(\n reinterpret_cast(model_with_input.GetBufferPointer()),\n model_with_input.GetSize());\n#if FLATBUFFERS_LITTLEENDIAN == 0\n tflite::FlatBufferModel::ByteSwapSerializedModel(&serialized_str, true);\n#endif\n std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(\n \"mobilenet_quant_with_input.tflite\",\n reinterpret_cast(serialized_str.c_str()),\n serialized_str.size());\n ASSERT_TRUE(!model_path.empty());\n auto model_loader = std::make_unique(model_path);\n Validator validator(std::move(model_loader), default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);\n EXPECT_FALSE(results.ok);\n EXPECT_EQ(results.metrics.size(), 0);\n EXPECT_EQ(results.delegate_error, 0);\n EXPECT_EQ(results.actual_inference_output.size(), model_output_size);\n EXPECT_EQ(results.actual_inference_output[0].size(),\n batch_size * kOutputTensorSize);\n}\nTEST_F(ValidatorTest, DelegateNotSupported) {\n proto::ComputeSettings settings_proto;\n settings_proto.mutable_tflite_settings()->set_delegate(proto::CORE_ML);\n flatbuffers::FlatBufferBuilder fbb;\n const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb);\n Validator validator(std::move(validation_model_loader_), settings);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkDelegateNotSupported);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);\n}\nTEST_F(ValidatorTest, NoValidationSubgraph) {\n Validator validator(std::move(plain_model_loader_),\n default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkValidationSubgraphNotFound);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);\n}\nTEST_F(ValidatorTest, NoValidationInputData) {\n ASSERT_TRUE(plain_model_loader_->Init());\n FlatBufferBuilder model_with_input;\n CustomValidationEmbedder embedder(1, {{}});\n EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),\n model_with_input),\n kMinibenchmarkSuccess);\n std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(\n \"mobilenet_quant_with_input.tflite\", model_with_input.GetBufferPointer(),\n model_with_input.GetSize());\n ASSERT_TRUE(!model_path.empty());\n auto model_loader = std::make_unique(model_path);\n Validator validator(std::move(model_loader), default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkValidationInputMissing);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);\n}\nTEST_F(ValidatorTest, InvalidModel) {\n const std::string dump_path = MiniBenchmarkTestHelper::DumpToTempFile(\n \"foo.tflite\", g_tflite_acceleration_embedded_mobilenet_validation_model,\n g_tflite_acceleration_embedded_mobilenet_validation_model_len - 12000);\n ASSERT_TRUE(!dump_path.empty());\n Validator validator(std::make_unique(dump_path),\n default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkModelInitFailed);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);\n}\nTEST_F(ValidatorTest, EmptyModelLoader) {\n Validator validator(nullptr, default_compute_settings_);\n Validator::Results results;\n Validator::Status validation_run = validator.RunValidation(&results);\n EXPECT_EQ(validation_run.status, kMinibenchmarkModelReadFailed);\n EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":238,"cells":{"ID":{"kind":"string","value":"ab1f52e5-859f-4781-a52a-c1677ca42465"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"parallel_map_dataset_op"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/kernels/data/parallel_map_dataset_op.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/parallel_map_dataset_op.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/call_once.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_format.h\"\n#include \"tensorflow/core/common_runtime/function.h\"\n#include \"tensorflow/core/common_runtime/input_colocation_exemption_registry.h\"\n#include \"tensorflow/core/data/dataset_utils.h\"\n#include \"tensorflow/core/data/name_utils.h\"\n#include \"tensorflow/core/data/stats_utils.h\"\n#include \"tensorflow/core/data/unbounded_thread_pool.h\"\n#include \"tensorflow/core/framework/dataset.h\"\n#include \"tensorflow/core/framework/metrics.h\"\n#include \"tensorflow/core/framework/model.h\"\n#include \"tensorflow/core/framework/partial_tensor_shape.h\"\n#include \"tensorflow/core/framework/stats_aggregator.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/random/random.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/stringprintf.h\"\n#include \"tensorflow/core/profiler/lib/traceme.h\"\n#include \"tensorflow/core/profiler/lib/traceme_encode.h\"\n#include \"tensorflow/core/protobuf/error_codes.pb.h\"\n#include \"tsl/platform/logging.h\"\nnamespace tensorflow {\nnamespace data {\n constexpr const char* const ParallelMapDatasetOp::kDatasetType;\n constexpr const char* const ParallelMapDatasetOp::kInputDataset;\n constexpr const char* const ParallelMapDatasetOp::kOtherArguments;\n constexpr const char* const\n ParallelMapDatasetOp::kNumParallelCalls;\n constexpr const char* const ParallelMapDatasetOp::kFunc;\n constexpr const char* const ParallelMapDatasetOp::kTarguments;\n constexpr const char* const ParallelMapDatasetOp::kOutputTypes;\n constexpr const char* const ParallelMapDatasetOp::kOutputShapes;\n constexpr const char* const\n ParallelMapDatasetOp::kUseInterOpParallelism;\n constexpr const char* const ParallelMapDatasetOp::kDeterministic;\n constexpr const char* const ParallelMapDatasetOp::kSloppy;\n constexpr const char* const\n ParallelMapDatasetOp::kPreserveCardinality;\nnamespace {\nconstexpr char kParallelMapDatasetV1[] = \"ParallelMapDataset\";\nconstexpr char kParallelMapDatasetV2[] = \"ParallelMapDatasetV2\";\nconstexpr char kInvocationResults[] = \"invocation_results\";\nconstexpr char kSize[] = \"size\";\nconstexpr char kEndOfInput[] = \"end_of_input\";\nconstexpr char kErrorCode[] = \"code\";\nconstexpr char kErrorMessage[] = \"error_message\";\nconstexpr int kStatsReportingPeriodMillis = 1000;\nconstexpr int kUnboundedThreadpoolAutotuningFactor = 10;\n} \nclass ParallelMapDatasetOp::Dataset : public DatasetBase {\n public:\n Dataset(OpKernelContext* ctx, const DatasetBase* input,\n int64_t num_parallel_calls, const DataTypeVector& output_types,\n const std::vector& output_shapes,\n DeterminismPolicy deterministic,\n std::unique_ptr captured_func,\n bool preserve_cardinality, bool use_unbounded_threadpool,\n int op_version)\n : Dataset(DatasetContext(ctx), input, num_parallel_calls, output_types,\n output_shapes, deterministic, std::move(captured_func),\n preserve_cardinality, use_unbounded_threadpool, op_version) {}\n Dataset(DatasetContext dataset_context, const DatasetBase* input,\n int64_t num_parallel_calls, const DataTypeVector& output_types,\n const std::vector& output_shapes,\n DeterminismPolicy deterministic,\n std::unique_ptr captured_func,\n bool preserve_cardinality, bool use_unbounded_threadpool,\n int op_version)\n : DatasetBase(std::move(dataset_context)),\n input_(input),\n num_parallel_calls_(num_parallel_calls),\n output_types_(output_types),\n output_shapes_(output_shapes),\n deterministic_(deterministic),\n preserve_cardinality_(preserve_cardinality),\n use_unbounded_threadpool_(use_unbounded_threadpool),\n captured_func_(std::move(captured_func)),\n op_version_(op_version) {\n input_->Ref();\n random_indexing_compatible_ = absl::OkStatus();\n if (input_ != nullptr) {\n random_indexing_compatible_ = input_->RandomIndexingCompatible();\n }\n }\n ~Dataset() override { input_->Unref(); }\n std::unique_ptr MakeIteratorInternal(\n const string& prefix) const override {\n name_utils::IteratorPrefixParams params;\n params.op_version = op_version_;\n return std::make_unique(Iterator::Params{\n this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});\n }\n const DataTypeVector& output_dtypes() const override { return output_types_; }\n const std::vector& output_shapes() const override {\n return output_shapes_;\n }\n string DebugString() const override {\n name_utils::DatasetDebugStringParams params;\n params.op_version = op_version_;\n return name_utils::DatasetDebugString(ParallelMapDatasetOp::kDatasetType,\n params);\n }\n int64_t CardinalityInternal(CardinalityOptions options) const override {\n if (preserve_cardinality_) {\n return input_->Cardinality(options);\n } else {\n return kUnknownCardinality;\n }\n }\n Status Get(OpKernelContext* ctx, int64 index,\n std::vector* out_tensors) const override {\n TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));\n absl::call_once(instantiated_captured_func_once_, [this, ctx] {\n instantiated_captured_func_status_ = captured_func_->Instantiate(\n InstantiateCapturedFunctionParams(ctx), &instantiated_captured_func_);\n });\n TF_RETURN_IF_ERROR(instantiated_captured_func_status_);\n std::vector args;\n TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));\n return instantiated_captured_func_->RunInstantiated(args, out_tensors);\n }\n Status InputDatasets(std::vector* inputs) const override {\n inputs->push_back(input_);\n return absl::OkStatus();\n }\n Status CheckExternalState() const override {\n TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());\n return input_->CheckExternalState();\n }\n absl::Status RandomIndexingCompatible() const override {\n return random_indexing_compatible_;\n }\n protected:\n Status AsGraphDefInternal(SerializationContext* ctx,\n DatasetGraphDefBuilder* b,\n Node** output) const override {\n Node* input_graph_node = nullptr;\n TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));\n std::vector other_arguments;\n DataTypeVector other_arguments_types;\n TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,\n &other_arguments_types));\n Node* num_parallel_calls = nullptr;\n if (op_version_ == 1) {\n TF_RETURN_IF_ERROR(b->AddScalar(static_cast(num_parallel_calls_),\n &num_parallel_calls));\n } else {\n TF_RETURN_IF_ERROR(\n b->AddScalar(num_parallel_calls_, &num_parallel_calls));\n }\n std::vector> attrs;\n AttrValue f_attr;\n b->BuildAttrValue(captured_func_->func(), &f_attr);\n attrs.emplace_back(kFunc, f_attr);\n AttrValue other_arguments_types_attr;\n b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);\n attrs.emplace_back(kTarguments, other_arguments_types_attr);\n AttrValue use_inter_op_parallelism_attr;\n b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),\n &use_inter_op_parallelism_attr);\n attrs.emplace_back(kUseInterOpParallelism, use_inter_op_parallelism_attr);\n if (op_version_ == 1) {\n AttrValue sloppy_attr;\n b->BuildAttrValue(deterministic_.IsNondeterministic(), &sloppy_attr);\n attrs.emplace_back(kSloppy, sloppy_attr);\n }\n if (op_version_ == 2) {\n AttrValue deterministic_attr;\n b->BuildAttrValue(deterministic_.String(), &deterministic_attr);\n attrs.emplace_back(kDeterministic, deterministic_attr);\n }\n AttrValue preserve_cardinality_attr;\n b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);\n attrs.emplace_back(kPreserveCardinality, preserve_cardinality_attr);\n AttrValue use_unbounded_threadpool_attr;\n b->BuildAttrValue(use_unbounded_threadpool_,\n &use_unbounded_threadpool_attr);\n attrs.emplace_back(kUseUnboundedThreadpool, use_unbounded_threadpool_attr);\n TF_RETURN_IF_ERROR(b->AddDataset(\n this,\n {std::make_pair(0, input_graph_node),\n std::make_pair(2, num_parallel_calls)}, \n {std::make_pair(1, other_arguments)}, \n attrs, output));\n return absl::OkStatus();\n }\n private:\n class Iterator : public DatasetIterator {\n public:\n explicit Iterator(const Params& params)\n : DatasetIterator(params),\n mu_(std::make_shared()),\n cond_var_(std::make_shared()),\n num_parallel_calls_(std::make_shared(\n params.dataset->num_parallel_calls_, mu_, cond_var_)),\n deterministic_(params.dataset->deterministic_.IsDeterministic() ||\n params.dataset->deterministic_.IsDefault()),\n preserve_cardinality_(params.dataset->preserve_cardinality_),\n use_unbounded_threadpool_(params.dataset->use_unbounded_threadpool_),\n autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}\n ~Iterator() override {\n CancelThreads(true);\n input_impl_.reset();\n if (deregister_fn_) deregister_fn_();\n }\n bool SymbolicCheckpointCompatible() const override {\n return deterministic_;\n }\n Status Initialize(IteratorContext* ctx) override {\n mutex_lock l(*mu_);\n interleave_depth_ = ctx->interleave_depth();\n if (use_unbounded_threadpool_) {\n unbounded_thread_pool_ = std::make_unique(\n ctx->env(), \"tf_data_map_unbounded_thread_pool\");\n }\n if (num_parallel_calls_->value == model::kAutotune) {\n num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);\n }\n cancellation_manager_ = std::make_unique();\n TF_RETURN_IF_ERROR(RegisterCancellationCallback(\n ctx->cancellation_manager(),\n [this]() { CancelThreads(false); }, &deregister_fn_));\n auto params = std::make_unique(ctx);\n params->cancellation_manager = cancellation_manager_.get();\n auto iter_ctx = std::make_unique(*params);\n TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(\n iter_ctx.get(), this, prefix(), &input_impl_));\n ctx->MergeCheckpoint(iter_ctx->checkpoint());\n TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(\n ctx, &instantiated_captured_func_));\n if (ctx->warm_start() && !ctx->is_restoring()) {\n EnsureThreadsStarted(ctx);\n }\n return absl::OkStatus();\n }\n Status GetNextInternal(IteratorContext* ctx,\n std::vector* out_tensors,\n bool* end_of_sequence) override {\n std::shared_ptr result;\n {\n mutex_lock l(*mu_);\n EnsureThreadsStarted(ctx);\n while (ShouldWait(&result)) {\n RecordStop(ctx);\n cond_var_->wait(l);\n RecordStart(ctx);\n }\n if (cancelled_) {\n return errors::Cancelled(\"Iterator was cancelled\");\n }\n }\n RecordStop(ctx);\n result->notification.WaitForNotification();\n RecordStart(ctx);\n tsl::profiler::TraceMe traceme([&] {\n return tsl::profiler::TraceMeEncode(\"ParallelMapConsume\",\n {{\"element_id\", result->uid}});\n });\n return ProcessResult(ctx, result, out_tensors, end_of_sequence);\n }\n protected:\n std::shared_ptr CreateNode(\n IteratorContext* ctx, model::Node::Args args) const override {\n std::shared_ptr parameter;\n double max_parallelism_value = ctx->runner_threadpool_size();\n if (use_unbounded_threadpool_) {\n max_parallelism_value *= kUnboundedThreadpoolAutotuningFactor;\n }\n if (num_parallel_calls_ &&\n dataset()->num_parallel_calls_ == model::kAutotune) {\n parameter = model::MakeParameter(\n \"parallelism\", num_parallel_calls_, 1,\n max_parallelism_value,\n GetAutotuneDefaultParallelism(ctx));\n } else {\n parameter =\n model::MakeParameter(\"parallelism\", num_parallel_calls_, 1,\n max_parallelism_value);\n }\n std::optional estimated_element_size =\n dataset()->GetEstimatedElementSize();\n if (!estimated_element_size) {\n VLOG(2) << absl::StrFormat(\n \"Cannot estimate the size of the output tensor because the \"\n \"output shape of node %s(id:%d) is only partially known.\",\n args.name, args.id);\n }\n return model::MakeAsyncKnownRatioNode(\n std::move(args),\n 1, {std::move(parameter)},\n false, estimated_element_size);\n }\n Status SaveInternal(SerializationContext* ctx,\n IteratorStateWriter* writer) override {\n TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(\n dataset()->captured_func_->CheckExternalState()));\n if (ctx->symbolic_checkpoint()) {\n return absl::OkStatus();\n }\n mutex_lock l(*mu_);\n while (num_calls_ > 0) {\n cond_var_->wait(l);\n }\n if (num_calls_ != 0) {\n return errors::FailedPrecondition(\n \"Unexpected outstanding calls encountered.\");\n }\n TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));\n TF_RETURN_IF_ERROR(writer->WriteScalar(\n prefix(), absl::StrCat(kInvocationResults, \"_\", kSize),\n invocation_results_.size()));\n for (size_t i = 0; i < invocation_results_.size(); i++) {\n const auto& result = *(invocation_results_[i]);\n std::string element_prefix =\n absl::StrCat(prefix(), \"_\", kInvocationResults, \"[\", i, \"]\");\n TF_RETURN_IF_ERROR(\n WriteStatusLocked(writer, element_prefix, result.status));\n TF_RETURN_IF_ERROR(writer->WriteScalar(element_prefix, kSize,\n result.return_values.size()));\n for (size_t j = 0; j < result.return_values.size(); j++) {\n TF_RETURN_IF_ERROR(writer->WriteTensor(element_prefix,\n absl::StrCat(\"[\", j, \"]\"),\n result.return_values[j]));\n }\n TF_RETURN_IF_ERROR(\n writer->WriteScalar(element_prefix, kEndOfInput,\n static_cast(result.end_of_input)));\n }\n return absl::OkStatus();\n }\n Status RestoreInternal(IteratorContext* ctx,\n IteratorStateReader* reader) override {\n mutex_lock l(*mu_);\n TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));\n DCHECK(invocation_results_.empty());\n if (ctx->symbolic_checkpoint()) {\n return absl::OkStatus();\n }\n int64_t invocation_results_size;\n TF_RETURN_IF_ERROR(reader->ReadScalar(\n prefix(), absl::StrCat(kInvocationResults, \"_\", kSize),\n &invocation_results_size));\n for (size_t i = 0; i < invocation_results_size; i++) {\n invocation_results_.push_back(std::make_shared(ctx));\n auto& result = *invocation_results_.back();\n std::string element_prefix =\n absl::StrCat(prefix(), \"_\", kInvocationResults, \"[\", i, \"]\");\n TF_RETURN_IF_ERROR(\n ReadStatusLocked(reader, element_prefix, &result.status));\n size_t num_return_values;\n {\n int64_t size;\n TF_RETURN_IF_ERROR(reader->ReadScalar(element_prefix, kSize, &size));\n num_return_values = static_cast(size);\n if (num_return_values != size) {\n return errors::InvalidArgument(\n element_prefix, \",\", kSize, \": \", size,\n \" is not a valid value of type size_t.\");\n }\n }\n result.return_values.reserve(num_return_values);\n for (size_t j = 0; j < num_return_values; j++) {\n result.return_values.emplace_back();\n TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), element_prefix,\n absl::StrCat(\"[\", j, \"]\"),\n &result.return_values.back()));\n }\n int64_t end_of_input;\n TF_RETURN_IF_ERROR(\n reader->ReadScalar(element_prefix, kEndOfInput, &end_of_input));\n result.end_of_input = static_cast(end_of_input);\n RecordBufferEnqueue(ctx, result.return_values);\n result.notification.Notify();\n }\n return absl::OkStatus();\n }\n TraceMeMetadata GetTraceMeMetadata() const override {\n int64_t parallelism = -1;\n if (mu_->try_lock()) {\n parallelism = num_parallel_calls_->value;\n mu_->unlock();\n }\n data::TraceMeMetadata result;\n result.push_back(\n std::make_pair(\"autotune\", autotune_ ? \"true\" : \"false\"));\n result.push_back(\n std::make_pair(\"deterministic\", deterministic_ ? \"true\" : \"false\"));\n result.push_back(\n std::make_pair(\"use_unbounded_threadpool\",\n use_unbounded_threadpool_ ? \"true\" : \"false\"));\n result.push_back(std::make_pair(\n \"parallelism\",\n parallelism == -1\n ? kTraceInfoUnavailable\n : strings::Printf(\"%lld\", static_cast(parallelism))));\n result.push_back(std::make_pair(\n \"interleave_depth\",\n strings::Printf(\"%lld\", static_cast(interleave_depth_))));\n return result;\n }\n private:\n struct InvocationResult {\n explicit InvocationResult(IteratorContext* ctx)\n : uid(tensorflow::EnvTime::NowNanos()),\n checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}\n Notification notification;\n Status status;\n std::vector return_values;\n bool end_of_input = false;\n const int64_t uid;\n MemoryCheckpoint checkpoint;\n };\n void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {\n cancellation_manager_->StartCancel();\n mutex_lock l(*mu_);\n cancelled_ = true;\n cond_var_->notify_all();\n while (wait && num_calls_ > 0) {\n cond_var_->wait(l);\n }\n }\n void EnsureThreadsStarted(IteratorContext* ctx)\n TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {\n if (!runner_thread_) {\n auto ctx_copy = std::make_shared(*ctx);\n runner_thread_ = ctx->StartThread(\n \"tf_data_parallel_map\",\n std::bind(&Iterator::RunnerThread, this, ctx_copy));\n if (ctx->stats_aggregator()) {\n stats_thread_ = ctx->StartThread(\n \"tf_data_parallel_map_stats\",\n std::bind(&Iterator::StatsThread, this, ctx_copy));\n }\n }\n }\n void CallCompleted(const std::shared_ptr& ctx,\n const std::shared_ptr& result)\n TF_LOCKS_EXCLUDED(*mu_) {\n mutex_lock l(*mu_);\n num_calls_--;\n result->notification.Notify();\n cond_var_->notify_all();\n }\n void CallFunction(const std::shared_ptr& ctx,\n const std::shared_ptr& result)\n TF_LOCKS_EXCLUDED(*mu_) {\n tsl::profiler::TraceMe traceme([&] {\n return tsl::profiler::TraceMeEncode(\"ParallelMapProduce\",\n {{\"element_id\", result->uid}});\n });\n std::vector input_element;\n result->status = input_impl_->GetNext(ctx.get(), &input_element,\n &result->end_of_input);\n result->checkpoint.Merge(ctx->checkpoint());\n if (result->end_of_input || !result->status.ok()) {\n CallCompleted(ctx, result);\n return;\n }\n auto done = [this, ctx, result](Status status) {\n if (!status.ok()) {\n result->status = AddErrorContext(status);\n }\n RecordBufferEnqueue(ctx.get(), result->return_values);\n CallCompleted(ctx, result);\n };\n if (use_unbounded_threadpool_) {\n auto runner_fn = [this](std::function fn) {\n this->unbounded_thread_pool_->Schedule(fn);\n };\n instantiated_captured_func_->RunAsync(\n runner_fn, ctx->cancellation_manager(), ctx->collective_executor(),\n std::move(input_element), &result->return_values, done,\n model_node());\n } else if (dataset()->captured_func_->use_inter_op_parallelism()) {\n instantiated_captured_func_->RunAsync(\n ctx.get(), std::move(input_element), &result->return_values,\n std::move(done), model_node());\n } else {\n auto fn = std::bind(\n [this, ctx, result](std::vector input_element) {\n return instantiated_captured_func_->Run(\n ctx.get(), std::move(input_element), &result->return_values,\n model_node());\n },\n std::move(input_element));\n (*ctx->runner())(\n [this, ctx, fn = std::move(fn), done = std::move(done)]() {\n Status s;\n if (IsRecording(ctx.get())) {\n s = fn();\n } else {\n RecordStart(ctx.get());\n s = fn();\n RecordStop(ctx.get());\n }\n done(s);\n });\n }\n }\n Status ProcessResult(IteratorContext* ctx,\n const std::shared_ptr& result,\n std::vector* out_tensors,\n bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {\n ctx->MergeCheckpoint(&result->checkpoint);\n if (!result->end_of_input && result->status.ok()) {\n *out_tensors = std::move(result->return_values);\n RecordBufferDequeue(ctx, *out_tensors);\n *end_of_sequence = false;\n return absl::OkStatus();\n }\n if (errors::IsOutOfRange(result->status)) {\n if (preserve_cardinality_) {\n return errors::InvalidArgument(\n \"Function invocation produced OutOfRangeError: \",\n result->status.message());\n } else {\n *end_of_sequence = true;\n return absl::OkStatus();\n }\n }\n *end_of_sequence = result->end_of_input;\n return result->status;\n }\n void RunnerThread(const std::shared_ptr& ctx)\n TF_LOCKS_EXCLUDED(*mu_) {\n RecordStart(ctx.get());\n auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });\n std::vector> new_calls;\n {\n tf_shared_lock l(*mu_); \n new_calls.reserve(num_parallel_calls_->value);\n }\n auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {\n int64_t num_parallel_calls = num_parallel_calls_->value;\n return num_calls_ >= num_parallel_calls ||\n invocation_results_.size() >= num_parallel_calls;\n };\n while (true) {\n {\n mutex_lock l(*mu_);\n while (!cancelled_ && busy()) {\n RecordStop(ctx.get());\n cond_var_->wait(l);\n RecordStart(ctx.get());\n }\n if (cancelled_) {\n return;\n }\n while (!busy()) {\n invocation_results_.push_back(\n std::make_shared(ctx.get()));\n new_calls.push_back(invocation_results_.back());\n num_calls_++;\n }\n cond_var_->notify_all();\n }\n for (const auto& call : new_calls) {\n CallFunction(ctx, call);\n }\n new_calls.clear();\n }\n }\n bool ShouldWait(std::shared_ptr* result)\n TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {\n if (cancelled_) {\n return false;\n }\n if (!deterministic_) {\n for (auto it = invocation_results_.begin();\n it != invocation_results_.end(); ++it) {\n if ((*it)->notification.HasBeenNotified() &&\n (it == invocation_results_.begin() || !(*it)->end_of_input)) {\n std::swap(*result, *it);\n invocation_results_.erase(it);\n cond_var_->notify_all();\n return false;\n }\n }\n } else if (!invocation_results_.empty()) {\n std::swap(*result, invocation_results_.front());\n invocation_results_.pop_front();\n cond_var_->notify_all();\n return false;\n }\n return true;\n }\n void StatsThread(const std::shared_ptr& ctx)\n TF_LOCKS_EXCLUDED(*mu_) {\n for (int64_t step = 0;; ++step) {\n int num_calls;\n int num_parallel_calls;\n {\n mutex_lock l(*mu_);\n if (step != 0 && !cancelled_) {\n cond_var_->wait_for(\n l, std::chrono::milliseconds(kStatsReportingPeriodMillis));\n }\n if (cancelled_) {\n return;\n }\n num_calls = num_calls_;\n num_parallel_calls = num_parallel_calls_->value;\n }\n if (num_parallel_calls == 0) {\n num_parallel_calls = 1;\n }\n ctx->stats_aggregator()->AddScalar(\n stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),\n static_cast(num_calls) /\n static_cast(num_parallel_calls),\n step);\n }\n }\n Status WriteStatusLocked(IteratorStateWriter* writer,\n const std::string& prefix, const Status& status)\n TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {\n TF_RETURN_IF_ERROR(\n writer->WriteScalar(prefix, absl::StrCat(\"_\", kErrorCode),\n static_cast(status.code())));\n if (!status.ok()) {\n TF_RETURN_IF_ERROR(writer->WriteScalar(prefix,\n absl::StrCat(\"_\", kErrorMessage),\n std::string(status.message())));\n }\n return absl::OkStatus();\n }\n Status ReadStatusLocked(IteratorStateReader* reader,\n const std::string& prefix, Status* status)\n TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {\n int64_t code_int;\n TF_RETURN_IF_ERROR(\n reader->ReadScalar(prefix, absl::StrCat(\"_\", kErrorCode), &code_int));\n absl::StatusCode code = static_cast(code_int);\n if (code != absl::StatusCode::kOk) {\n tstring error_message;\n TF_RETURN_IF_ERROR(reader->ReadScalar(\n prefix, absl::StrCat(\"_\", kErrorMessage), &error_message));\n *status = Status(code, error_message);\n } else {\n *status = absl::OkStatus();\n }\n return absl::OkStatus();\n }\n const std::shared_ptr mu_;\n const std::shared_ptr cond_var_;\n const std::shared_ptr num_parallel_calls_;\n const bool deterministic_;\n const bool preserve_cardinality_;\n const bool use_unbounded_threadpool_;\n const bool autotune_;\n int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;\n std::unique_ptr cancellation_manager_;\n std::unique_ptr instantiated_captured_func_;\n std::unique_ptr input_impl_;\n std::deque> invocation_results_\n TF_GUARDED_BY(*mu_);\n bool cancelled_ TF_GUARDED_BY(*mu_) = false;\n std::unique_ptr runner_thread_ TF_GUARDED_BY(*mu_);\n std::unique_ptr stats_thread_ TF_GUARDED_BY(*mu_);\n std::unique_ptr unbounded_thread_pool_;\n std::function deregister_fn_;\n int64 interleave_depth_ = -1;\n };\n const DatasetBase* const input_;\n const int64_t num_parallel_calls_;\n const DataTypeVector output_types_;\n const std::vector output_shapes_;\n const DeterminismPolicy deterministic_;\n const bool preserve_cardinality_;\n const bool use_unbounded_threadpool_;\n const std::unique_ptr captured_func_;\n const int op_version_;\n mutable absl::once_flag instantiated_captured_func_once_;\n mutable absl::Status instantiated_captured_func_status_;\n mutable std::unique_ptr\n instantiated_captured_func_;\n absl::Status random_indexing_compatible_;\n};\nParallelMapDatasetOp::ParallelMapDatasetOp(OpKernelConstruction* ctx)\n : UnaryDatasetOpKernel(ctx), op_version_(ctx->HasAttr(kSloppy) ? 1 : 2) {\n FunctionMetadata::Params params;\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism,\n &params.use_inter_op_parallelism));\n OP_REQUIRES_OK(ctx,\n FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));\n if (op_version_ == 1) {\n bool sloppy;\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kSloppy, &sloppy));\n if (sloppy) {\n deterministic_ =\n DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic);\n } else {\n deterministic_ = DeterminismPolicy(DeterminismPolicy::Type::kDefault);\n }\n use_unbounded_threadpool_ = false;\n }\n if (op_version_ == 2) {\n std::string deterministic;\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));\n OP_REQUIRES_OK(\n ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));\n OP_REQUIRES_OK(\n ctx, ctx->GetAttr(kUseUnboundedThreadpool, &use_unbounded_threadpool_));\n }\n OP_REQUIRES_OK(ctx,\n ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));\n}\nvoid ParallelMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,\n DatasetBase** output) {\n int64_t num_parallel_calls;\n if (op_version_ == 1) {\n int32_t parallel_calls;\n OP_REQUIRES_OK(\n ctx, ParseScalarArgument(ctx, kNumParallelCalls, &parallel_calls));\n num_parallel_calls = parallel_calls;\n }\n if (op_version_ == 2) {\n OP_REQUIRES_OK(\n ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));\n }\n OP_REQUIRES(\n ctx, num_parallel_calls > 0 || num_parallel_calls == model::kAutotune,\n errors::InvalidArgument(\"num_parallel_calls must be greater than zero.\"));\n std::unique_ptr captured_func;\n OP_REQUIRES_OK(ctx,\n CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,\n &captured_func));\n if (num_parallel_calls == model::kAutotune) {\n metrics::RecordTFDataAutotune(kDatasetType);\n }\n *output = new Dataset(ctx, input, num_parallel_calls, output_types_,\n output_shapes_, deterministic_,\n std::move(captured_func), preserve_cardinality_,\n use_unbounded_threadpool_, op_version_);\n}\nstd::unique_ptr MakeDataServiceUncompressDataset(\n DatasetBase* input, std::unique_ptr captured_function,\n const DataTypeVector& output_types,\n const std::vector& output_shapes) {\n DatasetContext::Params param;\n param.type_string = kParallelMapDatasetV2;\n param.node_name = kParallelMapDatasetV2;\n return std::make_unique(\n DatasetContext(std::move(param)), input,\n model::kAutotune, output_types, output_shapes,\n DeterminismPolicy(DeterminismPolicy::Type::kDefault),\n std::move(captured_function),\n true,\n false, 2);\n}\nnamespace {\nREGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV1).Device(DEVICE_CPU),\n ParallelMapDatasetOp);\nREGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV2).Device(DEVICE_CPU),\n ParallelMapDatasetOp);\nREGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV1);\nREGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV2);\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/parallel_map_dataset_op.h\"\n#include \n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tensorflow/core/data/dataset_test_base.h\"\n#include \"tensorflow/core/data/name_utils.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nconstexpr char kNodeName[] = \"parallel_map_dataset\";\nconstexpr int kOpVersion = 2;\nclass ParallelMapDatasetParams : public DatasetParams {\n public:\n template \n ParallelMapDatasetParams(\n T input_dataset_params, std::vector other_arguments,\n int num_parallel_calls, FunctionDefHelper::AttrValueWrapper func,\n std::vector func_lib, DataTypeVector type_arguments,\n const DataTypeVector& output_dtypes,\n const std::vector& output_shapes,\n bool use_inter_op_parallelism, const std::string& deterministic,\n bool preserve_cardinality, string node_name)\n : DatasetParams(std::move(output_dtypes), std::move(output_shapes),\n std::move(node_name)),\n other_arguments_(std::move(other_arguments)),\n num_parallel_calls_(num_parallel_calls),\n func_(std::move(func)),\n func_lib_(std::move(func_lib)),\n type_arguments_(std::move(type_arguments)),\n use_inter_op_parallelism_(use_inter_op_parallelism),\n deterministic_(deterministic),\n preserve_cardinality_(preserve_cardinality) {\n input_dataset_params_.push_back(std::make_unique(input_dataset_params));\n op_version_ = kOpVersion;\n name_utils::IteratorPrefixParams params;\n params.op_version = op_version_;\n iterator_prefix_ = name_utils::IteratorPrefix(\n input_dataset_params.dataset_type(),\n input_dataset_params.iterator_prefix(), params);\n }\n std::vector GetInputTensors() const override {\n auto input_tensors = other_arguments_;\n input_tensors.emplace_back(\n CreateTensor(TensorShape({}), {num_parallel_calls_}));\n return input_tensors;\n }\n Status GetInputNames(std::vector* input_names) const override {\n input_names->emplace_back(ParallelMapDatasetOp::kInputDataset);\n for (int i = 0; i < other_arguments_.size(); ++i) {\n input_names->emplace_back(\n absl::StrCat(ParallelMapDatasetOp::kOtherArguments, \"_\", i));\n }\n input_names->emplace_back(ParallelMapDatasetOp::kNumParallelCalls);\n return absl::OkStatus();\n }\n Status GetAttributes(AttributeVector* attr_vector) const override {\n *attr_vector = {{\"f\", func_},\n {\"Targuments\", type_arguments_},\n {\"output_shapes\", output_shapes_},\n {\"output_types\", output_dtypes_},\n {\"use_inter_op_parallelism\", use_inter_op_parallelism_},\n {\"deterministic\", deterministic_},\n {\"preserve_cardinality\", preserve_cardinality_},\n {\"metadata\", \"\"}};\n return absl::OkStatus();\n }\n string dataset_type() const override {\n return ParallelMapDatasetOp::kDatasetType;\n }\n std::vector func_lib() const override { return func_lib_; }\n private:\n std::vector other_arguments_;\n int num_parallel_calls_;\n FunctionDefHelper::AttrValueWrapper func_;\n std::vector func_lib_;\n DataTypeVector type_arguments_;\n bool use_inter_op_parallelism_;\n std::string deterministic_;\n bool preserve_cardinality_;\n};\nclass ParallelMapDatasetOpTest : public DatasetOpsTestBase {};\nFunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,\n const DataType& dtype) {\n return FunctionDefHelper::FunctionRef(func_name, {{\"T\", dtype}});\n}\nParallelMapDatasetParams ParallelMapDatasetParams1() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 1,\n MapFunc(\"XTimesTwo\", DT_INT64),\n {test::function::XTimesTwo()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n false,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams2() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 2,\n MapFunc(\"XTimesTwo\", DT_INT64),\n {test::function::XTimesTwo()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n true,\n DeterminismPolicy::kNondeterministic,\n true,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams3() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 3,\n MapFunc(\"XTimesFour\", DT_INT64),\n {test::function::XTimesTwo(), test::function::XTimesFour()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n true,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams4() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 4,\n MapFunc(\"XTimesTwo\", DT_INT64),\n {test::function::XTimesTwo()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n false,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams5() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n model::kAutotune,\n MapFunc(\"XTimesFour\", DT_INT64),\n {test::function::XTimesTwo(), test::function::XTimesFour()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n true,\n DeterminismPolicy::kNondeterministic,\n true,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams6() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 4,\n MapFunc(\"XTimesFour\", DT_INT64),\n {test::function::XTimesTwo(), test::function::XTimesFour()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n true,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams7() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n 2,\n MapFunc(\"XTimesFour\", DT_INT64),\n {test::function::XTimesTwo(), test::function::XTimesFour()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n false,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams8() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n model::kAutotune,\n MapFunc(\"XTimesFour\", DT_INT64),\n {test::function::XTimesTwo(), test::function::XTimesFour()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n false,\n DeterminismPolicy::kNondeterministic,\n true,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParams9() {\n return ParallelMapDatasetParams(\n BatchDatasetParams(RangeDatasetParams(0, 4, 1),\n 3,\n false,\n false,\n {DT_INT64},\n {PartialTensorShape({-1})},\n \"batch_dataset\"),\n {},\n 1,\n MapFunc(\"XTimesTwo\", DT_INT64),\n {test::function::XTimesTwo()},\n {},\n {DT_INT64},\n {PartialTensorShape({-1})},\n false,\n DeterminismPolicy::kDeterministic,\n false,\n kNodeName);\n}\nParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls() {\n return ParallelMapDatasetParams(\n RangeDatasetParams(0, 10, 3),\n {},\n -4,\n MapFunc(\"XTimesTwo\", DT_INT64),\n {test::function::XTimesTwo()},\n {},\n {DT_INT64},\n {PartialTensorShape({})},\n true,\n DeterminismPolicy::kNondeterministic,\n true,\n kNodeName);\n}\nstd::vector> GetNextTestCases() {\n return {{ParallelMapDatasetParams1(),\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n true},\n {ParallelMapDatasetParams2(),\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n false},\n {ParallelMapDatasetParams3(),\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n true},\n {ParallelMapDatasetParams4(),\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n true},\n {ParallelMapDatasetParams5(),\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n false},\n {\n ParallelMapDatasetParams6(),\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n true},\n {\n ParallelMapDatasetParams9(),\n {CreateTensor(TensorShape{3}, {0, 2, 4}),\n CreateTensor(TensorShape{1}, {6})},\n true}};\n}\nITERATOR_GET_NEXT_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,\n GetNextTestCases())\nTEST_F(ParallelMapDatasetOpTest, DatasetNodeName) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));\n}\nTEST_F(ParallelMapDatasetOpTest, DatasetTypeString) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n name_utils::OpNameParams params;\n params.op_version = dataset_params.op_version();\n TF_ASSERT_OK(CheckDatasetTypeString(\n name_utils::OpName(ParallelMapDatasetOp::kDatasetType, params)));\n}\nTEST_F(ParallelMapDatasetOpTest, DatasetOutputDtypes) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));\n}\nTEST_F(ParallelMapDatasetOpTest, DatasetOutputShapes) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));\n}\nTEST_F(ParallelMapDatasetOpTest, DatasetElementSizeHasValue) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n auto element_size = dataset_->GetEstimatedElementSize();\n ASSERT_TRUE(element_size.has_value());\n EXPECT_GT(element_size.value(), 0);\n}\nTEST_F(ParallelMapDatasetOpTest, DatasetElementSizeNoValue) {\n auto dataset_params = ParallelMapDatasetParams9();\n TF_ASSERT_OK(Initialize(dataset_params));\n EXPECT_FALSE(dataset_->GetEstimatedElementSize().has_value());\n}\nstd::vector>\nCardinalityTestCases() {\n return {{ParallelMapDatasetParams1(),\n kUnknownCardinality},\n {ParallelMapDatasetParams2(),\n 4},\n {ParallelMapDatasetParams3(),\n kUnknownCardinality},\n {ParallelMapDatasetParams4(),\n kUnknownCardinality},\n {ParallelMapDatasetParams5(),\n 4},\n {ParallelMapDatasetParams6(),\n kUnknownCardinality}};\n}\nDATASET_CARDINALITY_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,\n CardinalityTestCases())\nTEST_F(ParallelMapDatasetOpTest, IteratorOutputDtypes) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));\n}\nTEST_F(ParallelMapDatasetOpTest, IteratorOutputShapes) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));\n}\nTEST_F(ParallelMapDatasetOpTest, IteratorPrefix) {\n auto dataset_params = ParallelMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n name_utils::IteratorPrefixParams params;\n params.op_version = dataset_params.op_version();\n TF_ASSERT_OK(CheckIteratorPrefix(\n name_utils::IteratorPrefix(ParallelMapDatasetOp::kDatasetType,\n dataset_params.iterator_prefix(), params)));\n}\nstd::vector>\nIteratorSaveAndRestoreTestCases() {\n return {{ParallelMapDatasetParams1(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n true},\n {ParallelMapDatasetParams2(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n false},\n {ParallelMapDatasetParams3(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n true},\n {ParallelMapDatasetParams4(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {6}, {12}, {18}}),\n true},\n {ParallelMapDatasetParams5(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n false},\n {\n ParallelMapDatasetParams6(),\n {0, 1, 5},\n CreateTensors(TensorShape{}, {{0}, {12}, {24}, {36}}),\n true}};\n}\nITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelMapDatasetOpTest,\n ParallelMapDatasetParams,\n IteratorSaveAndRestoreTestCases())\nTEST_F(ParallelMapDatasetOpTest, InvalidNumParallelCalls) {\n auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls();\n EXPECT_EQ(Initialize(dataset_params).code(),\n absl::StatusCode::kInvalidArgument);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":239,"cells":{"ID":{"kind":"string","value":"1da53164-e7c9-403c-8625-eaec6fdf5874"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"wav_to_spectrogram"},"File Path in Repository":{"kind":"string","value":"tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h\"\n#include \n#include \"tensorflow/cc/ops/audio_ops.h\"\n#include \"tensorflow/cc/ops/const_op.h\"\n#include \"tensorflow/cc/ops/image_ops.h\"\n#include \"tensorflow/cc/ops/standard_ops.h\"\n#include \"tensorflow/core/framework/graph.pb.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/graph/default_device.h\"\n#include \"tensorflow/core/graph/graph_def_builder.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/lib/core/stringpiece.h\"\n#include \"tensorflow/core/lib/core/threadpool.h\"\n#include \"tensorflow/core/lib/io/path.h\"\n#include \"tensorflow/core/lib/strings/stringprintf.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/types.h\"\n#include \"tensorflow/core/public/session.h\"\nusing tensorflow::DT_FLOAT;\nusing tensorflow::DT_UINT8;\nusing tensorflow::Output;\nusing tensorflow::TensorShape;\ntensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav,\n int32_t window_size, int32_t stride,\n float brightness,\n const tensorflow::string& output_image) {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace tensorflow::ops; \n Output file_reader =\n tensorflow::ops::ReadFile(root.WithOpName(\"input_wav\"), input_wav);\n DecodeWav wav_decoder =\n DecodeWav(root.WithOpName(\"wav_decoder\"), file_reader);\n Output spectrogram = AudioSpectrogram(root.WithOpName(\"spectrogram\"),\n wav_decoder.audio, window_size, stride);\n Output brightness_placeholder =\n Placeholder(root.WithOpName(\"brightness_placeholder\"), DT_FLOAT,\n Placeholder::Attrs().Shape(TensorShape({})));\n Output mul = Mul(root.WithOpName(\"mul\"), spectrogram, brightness_placeholder);\n Output min_const = Const(root.WithOpName(\"min_const\"), 255.0f);\n Output min = Minimum(root.WithOpName(\"min\"), mul, min_const);\n Output cast = Cast(root.WithOpName(\"cast\"), min, DT_UINT8);\n Output expand_dims_const = Const(root.WithOpName(\"expand_dims_const\"), -1);\n Output expand_dims =\n ExpandDims(root.WithOpName(\"expand_dims\"), cast, expand_dims_const);\n Output squeeze = Squeeze(root.WithOpName(\"squeeze\"), expand_dims,\n Squeeze::Attrs().Axis({0}));\n Output png_encoder = EncodePng(root.WithOpName(\"png_encoder\"), squeeze);\n tensorflow::ops::WriteFile file_writer = tensorflow::ops::WriteFile(\n root.WithOpName(\"output_image\"), output_image, png_encoder);\n tensorflow::GraphDef graph;\n TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));\n std::unique_ptr session(\n tensorflow::NewSession(tensorflow::SessionOptions()));\n TF_RETURN_IF_ERROR(session->Create(graph));\n tensorflow::Tensor brightness_tensor(DT_FLOAT, TensorShape({}));\n brightness_tensor.scalar()() = brightness;\n TF_RETURN_IF_ERROR(\n session->Run({{\"brightness_placeholder\", brightness_tensor}}, {},\n {\"output_image\"}, nullptr));\n return absl::OkStatus();\n}"},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/io/path.h\"\n#include \"tensorflow/core/lib/wav/wav_io.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/test.h\"\nTEST(WavToSpectrogramTest, WavToSpectrogramTest) {\n const tensorflow::string input_wav =\n tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), \"input_wav.wav\");\n const tensorflow::string output_image = tensorflow::io::JoinPath(\n tensorflow::testing::TmpDir(), \"output_image.png\");\n float audio[8] = {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f};\n tensorflow::string wav_string;\n TF_ASSERT_OK(\n tensorflow::wav::EncodeAudioAsS16LEWav(audio, 44100, 1, 8, &wav_string));\n TF_ASSERT_OK(tensorflow::WriteStringToFile(tensorflow::Env::Default(),\n input_wav, wav_string));\n TF_ASSERT_OK(WavToSpectrogram(input_wav, 4, 4, 64.0f, output_image));\n TF_EXPECT_OK(tensorflow::Env::Default()->FileExists(output_image));\n}"},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":240,"cells":{"ID":{"kind":"string","value":"52916fe4-bde7-4bfd-a579-9931b007d93f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"math"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/hlo/builder/lib/math.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/hlo/builder/lib/math_test.cc"},"Code":{"kind":"string","value":"#include \"xla/hlo/builder/lib/math.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/builder/lib/arithmetic.h\"\n#include \"xla/hlo/builder/lib/constants.h\"\n#include \"xla/hlo/builder/lib/loops.h\"\n#include \"xla/hlo/builder/lib/math_impl.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/shape.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\ntemplate \nXlaOp EvaluatePolynomial(XlaOp x, absl::Span coefficients) {\n static_assert(std::is_floating_point::value,\n \"Template-argument 'FP' must be a floating-point type\");\n if (coefficients.empty()) {\n return ScalarLike(x, FP(0.0));\n }\n XlaOp poly = ScalarLike(x, coefficients[0]);\n for (int i = 1; i < coefficients.size(); ++i) {\n FP c = coefficients[i];\n poly = poly * x + ScalarLike(x, c);\n }\n return poly;\n}\ntemplate \nXlaOp EvaluateChebyshevPolynomial(XlaOp x, absl::Span coefficients) {\n static_assert(std::is_floating_point::value,\n \"Template-argument 'FP' must be a floating-point type\");\n XlaOp b0 = ScalarLike(x, 0.0);\n XlaOp b1 = ScalarLike(x, 0.0);\n XlaOp b2 = ScalarLike(x, 0.0);\n for (FP c : coefficients) {\n b2 = b1;\n b1 = b0;\n b0 = x * b1 - b2 + ScalarLike(x, c);\n }\n return ScalarLike(x, 0.5) * (b0 - b2);\n}\n} \nstatic XlaOp DoWithUpcastToF32(XlaOp operand,\n absl::Span upcast_types,\n const std::function& operation) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));\n PrimitiveType elem_ty = shape.element_type();\n bool needs_upcast =\n upcast_types.empty()\n ? primitive_util::BitWidth(shape.element_type()) <= 16\n : absl::c_linear_search(upcast_types, elem_ty);\n if (needs_upcast) {\n operand = ConvertElementType(operand, F32);\n }\n XlaOp result = operation(operand);\n if (needs_upcast) {\n result = ConvertElementType(result, elem_ty);\n }\n return result;\n });\n}\nstatic absl::Status EnsureOperandIsRealFp(absl::string_view op_name,\n XlaOp operand) {\n auto& b = *operand.builder();\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));\n auto elem_ty = shape.element_type();\n if (!primitive_util::IsFloatingPointType(elem_ty)) {\n return InvalidArgument(\n \"Operands to %s must be real-valued floating-point, but got %s\",\n op_name, PrimitiveType_Name(elem_ty));\n }\n return absl::OkStatus();\n}\nXlaOp IsPosInf(XlaOp operand) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IsPosInf\", operand));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));\n return Eq(operand, MaxValue(&b, shape.element_type()));\n });\n}\nXlaOp IsNegInf(XlaOp operand) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IsNegInf\", operand));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));\n return Eq(operand, MinValue(&b, shape.element_type()));\n });\n}\nXlaOp IsInf(XlaOp operand) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IsInf\", operand));\n return IsPosInf(Abs(operand));\n });\n}\nXlaOp IsNan(XlaOp operand) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IsNan\", operand));\n return Ne(operand, operand);\n });\n}\nXlaOp IsNegZero(XlaOp operand) {\n auto& b = *operand.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IsNegZero\", operand));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));\n switch (shape.element_type()) {\n case F64:\n return Eq(BitcastConvertType(operand, U64),\n ConstantR0WithType(&b, U64, uint64_t{1} << 63));\n case F32:\n return Eq(BitcastConvertType(operand, U32),\n ConstantR0WithType(&b, U32, uint32_t{1} << 31));\n case F8E3M4:\n case F8E4M3:\n case F8E5M2:\n case F8E4M3FN:\n case F8E4M3B11FNUZ:\n case F8E5M2FNUZ:\n case F8E4M3FNUZ:\n case F16:\n case BF16:\n return Eq(BitcastConvertType(ConvertElementType(operand, F32), U32),\n ConstantR0WithType(&b, U32, uint32_t{1} << 31));\n default:\n LOG(FATAL) << \"Expected real fp type.\";\n }\n });\n}\nXlaOp Square(XlaOp operand) { return operand * operand; }\nXlaOp Reciprocal(XlaOp operand) { return ScalarLike(operand, 1.0) / operand; }\nstatic XlaOp ErfcImpl32(XlaOp x) {\n const double kMaxlog = 88.72283905206835;\n static const std::array kErfcPCoefficient{\n +2.326819970068386E-2, -1.387039388740657E-1, +3.687424674597105E-1,\n -5.824733027278666E-1, +6.210004621745983E-1, -4.944515323274145E-1,\n +3.404879937665872E-1, -2.741127028184656E-1, +5.638259427386472E-1,\n };\n static const std::array kErfcRCoefficient{\n -1.047766399936249E+1, +1.297719955372516E+1, -7.495518717768503E+0,\n +2.921019019210786E+0, -1.015265279202700E+0, +4.218463358204948E-1,\n -2.820767439740514E-1, +5.641895067754075E-1,\n };\n XlaOp abs_x = Abs(x);\n XlaOp z = Exp(-x * x);\n XlaOp q = ScalarLike(x, 1) / abs_x;\n XlaOp y = q * q;\n XlaOp p = Select(Lt(abs_x, ScalarLike(x, 2.0)),\n EvaluatePolynomial(y, kErfcPCoefficient),\n EvaluatePolynomial(y, kErfcRCoefficient));\n y = z * q * p;\n XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);\n return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);\n}\nstatic XlaOp ErfImpl32Cephes(XlaOp x) {\n static const std::array kErfTCoefficient{\n +7.853861353153693E-5, -8.010193625184903E-4, +5.188327685732524E-3,\n -2.685381193529856E-2, +1.128358514861418E-1, -3.761262582423300E-1,\n +1.128379165726710E+0,\n };\n return x * EvaluatePolynomial(x * x, kErfTCoefficient);\n}\nstatic XlaOp ErfcImpl64(XlaOp x) {\n const double kMaxlog = 7.09782712893383996843E2;\n static const std::array kErfcPCoefficient{\n 2.46196981473530512524E-10, 5.64189564831068821977E-1,\n 7.46321056442269912687E0, 4.86371970985681366614E1,\n 1.96520832956077098242E2, 5.26445194995477358631E2,\n 9.34528527171957607540E2, 1.02755188689515710272E3,\n 5.57535335369399327526E2};\n static const std::array kErfcQCoefficient{\n 1.00000000000000000000E0, 1.32281951154744992508E1,\n 8.67072140885989742329E1, 3.54937778887819891062E2,\n 9.75708501743205489753E2, 1.82390916687909736289E3,\n 2.24633760818710981792E3, 1.65666309194161350182E3,\n 5.57535340817727675546E2};\n static const std::array kErfcRCoefficient{\n 5.64189583547755073984E-1, 1.27536670759978104416E0,\n 5.01905042251180477414E0, 6.16021097993053585195E0,\n 7.40974269950448939160E0, 2.97886665372100240670E0};\n static const std::array kErfcSCoefficient{\n 1.00000000000000000000E0, 2.26052863220117276590E0,\n 9.39603524938001434673E0, 1.20489539808096656605E1,\n 1.70814450747565897222E1, 9.60896809063285878198E0,\n 3.36907645100081516050E0};\n XlaOp z = -x * x;\n XlaOp abs_x = Abs(x);\n XlaOp y =\n Select(Lt(abs_x, ScalarLike(x, 8.0)),\n Exp(z) * EvaluatePolynomial(abs_x, kErfcPCoefficient) /\n EvaluatePolynomial(abs_x, kErfcQCoefficient),\n Exp(z) * EvaluatePolynomial(abs_x, kErfcRCoefficient) /\n EvaluatePolynomial(abs_x, kErfcSCoefficient));\n XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);\n return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);\n}\nstatic XlaOp ErfImpl64(XlaOp x) {\n static std::array kErfTCoefficient{\n 9.60497373987051638749E0, 9.00260197203842689217E1,\n 2.23200534594684319226E3, 7.00332514112805075473E3,\n 5.55923013010394962768E4};\n static std::array kErfUCoefficient{\n 1.00000000000000000000E0, 3.35617141647503099647E1,\n 5.21357949780152679795E2, 4.59432382970980127987E3,\n 2.26290000613890934246E4, 4.92673942608635921086E4};\n XlaOp z = x * x;\n return x * EvaluatePolynomial(z, kErfTCoefficient) /\n EvaluatePolynomial(z, kErfUCoefficient);\n}\nXlaOp Erfc(XlaOp x) {\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Erfc\", x));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));\n if (shape.element_type() == F64) {\n return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl64(x),\n ScalarLike(x, 1) - ErfImpl64(x));\n }\n return DoWithUpcastToF32(x, {}, [](XlaOp x) {\n return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl32(x),\n ScalarLike(x, 1) - ErfImpl32Cephes(x));\n });\n });\n}\nstatic XlaOp ErfImpl32(XlaOp x) {\n static const std::array kAlpha{\n 0.00022905065861350646f, 0.0034082910107109506f, 0.050955695062380861f,\n 0.18520832239976145f, 1.128379143519084f};\n static const std::array kBeta{-1.1791602954361697e-7,\n 0.000023547966471313185f,\n 0.0010179625278914885f,\n 0.014070470171167667f,\n 0.11098505178285362f,\n 0.49746925110067538f,\n 1.0f};\n constexpr float kErfInvOneMinusHalfULP = 3.7439211627767994f;\n x = Clamp(ScalarLike(x, -kErfInvOneMinusHalfULP), x,\n ScalarLike(x, kErfInvOneMinusHalfULP));\n auto x2 = x * x;\n return (x * EvaluatePolynomial(x2, kAlpha)) /\n EvaluatePolynomial(x2, kBeta);\n}\nnamespace {\nXlaOp ErfInv32(XlaOp x) {\n constexpr int kDegree = 9;\n constexpr std::array w_less_than_5_constants = {\n 2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,\n -4.39150654e-06f, 0.00021858087f, -0.00125372503f,\n -0.00417768164f, 0.246640727f, 1.50140941f};\n constexpr std::array w_greater_than_5_constants = {\n -0.000200214257f, 0.000100950558f, 0.00134934322f,\n -0.00367342844f, 0.00573950773f, -0.0076224613f,\n 0.00943887047f, 1.00167406f, 2.83297682f};\n auto w = -Log1p(-x * x);\n auto lt = Lt(w, ScalarLike(x, 5.0));\n auto coefficient = [&](int i) {\n return Select(lt, FullLike(x, w_less_than_5_constants[i]),\n FullLike(x, w_greater_than_5_constants[i]));\n };\n w = Select(lt, w - ScalarLike(x, 2.5), Sqrt(w) - ScalarLike(x, 3.0));\n auto p = coefficient(0);\n for (int i = 1; i < kDegree; ++i) {\n p = coefficient(i) + p * w;\n }\n XlaOp result = p * x;\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));\n return Select(Eq(Abs(x), ScalarLike(x, 1)),\n x * MaxValue(&b, shape.element_type()), result);\n });\n}\nXlaOp ErfInv64(XlaOp x) {\n constexpr std::array w_less_than_6_25_constants = {\n -3.6444120640178196996e-21, -1.685059138182016589e-19,\n 1.2858480715256400167e-18, 1.115787767802518096e-17,\n -1.333171662854620906e-16, 2.0972767875968561637e-17,\n 6.6376381343583238325e-15, -4.0545662729752068639e-14,\n -8.1519341976054721522e-14, 2.6335093153082322977e-12,\n -1.2975133253453532498e-11, -5.4154120542946279317e-11,\n 1.051212273321532285e-09, -4.1126339803469836976e-09,\n -2.9070369957882005086e-08, 4.2347877827932403518e-07,\n -1.3654692000834678645e-06, -1.3882523362786468719e-05,\n 0.0001867342080340571352, -0.00074070253416626697512,\n -0.0060336708714301490533, 0.24015818242558961693,\n 1.6536545626831027356};\n constexpr std::array w_less_than_16_constants = {\n 2.2137376921775787049e-09, 9.0756561938885390979e-08,\n -2.7517406297064545428e-07, 1.8239629214389227755e-08,\n 1.5027403968909827627e-06, -4.013867526981545969e-06,\n 2.9234449089955446044e-06, 1.2475304481671778723e-05,\n -4.7318229009055733981e-05, 6.8284851459573175448e-05,\n 2.4031110387097893999e-05, -0.0003550375203628474796,\n 0.00095328937973738049703, -0.0016882755560235047313,\n 0.0024914420961078508066, -0.0037512085075692412107,\n 0.005370914553590063617, 1.0052589676941592334,\n 3.0838856104922207635,\n };\n constexpr std::array w_greater_than_16_constants = {\n -2.7109920616438573243e-11, -2.5556418169965252055e-10,\n 1.5076572693500548083e-09, -3.7894654401267369937e-09,\n 7.6157012080783393804e-09, -1.4960026627149240478e-08,\n 2.9147953450901080826e-08, -6.7711997758452339498e-08,\n 2.2900482228026654717e-07, -9.9298272942317002539e-07,\n 4.5260625972231537039e-06, -1.9681778105531670567e-05,\n 7.5995277030017761139e-05, -0.00021503011930044477347,\n -0.00013871931833623122026, 1.0103004648645343977,\n 4.8499064014085844221,\n };\n auto w = -Log1p(-x * x);\n auto lt_6_25 = Lt(w, ScalarLike(x, 6.25));\n auto lt_16 = Lt(w, ScalarLike(x, 16));\n auto coefficient = [&](int i) {\n auto c = FullLike(x, w_less_than_6_25_constants[i]);\n if (i < 19) {\n c = Select(lt_6_25, c, FullLike(x, w_less_than_16_constants[i]));\n }\n if (i < 17) {\n c = Select(lt_16, c, FullLike(x, w_greater_than_16_constants[i]));\n }\n return c;\n };\n auto sqrt_w = Sqrt(w);\n w = Select(lt_6_25, w - ScalarLike(x, 3.125),\n sqrt_w - Select(lt_16, ScalarLike(x, 3.25), ScalarLike(x, 5.0)));\n auto p = coefficient(0);\n for (int i = 1; i < 17; ++i) {\n p = coefficient(i) + p * w;\n }\n for (int i = 17; i < 19; ++i) {\n p = Select(lt_16, coefficient(i) + p * w, p);\n }\n for (int i = 19; i < 23; ++i) {\n p = Select(lt_6_25, coefficient(i) + p * w, p);\n }\n XlaOp result = p * x;\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));\n return Select(Eq(Abs(x), ScalarLike(x, 1)),\n x * MaxValue(&b, shape.element_type()), result);\n });\n}\n} \nXlaOp ErfInv(XlaOp x) {\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"ErfInv\", x));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));\n if (shape.element_type() == F64) {\n return ErfInv64(x);\n }\n return DoWithUpcastToF32(x, {}, [](XlaOp x) { return ErfInv32(x); });\n });\n}\nnamespace {\nstatic constexpr double kLanczosGamma = 7; \nstatic constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;\nstatic constexpr std::array kLanczosCoefficients = {\n 676.520368121885098567009190444019, -1259.13921672240287047156078755283,\n 771.3234287776530788486528258894, -176.61502916214059906584551354,\n 12.507343278686904814458936853, -0.13857109526572011689554707,\n 9.984369578019570859563e-6, 1.50563273514931155834e-7};\n} \nXlaOp Lgamma(XlaOp input) {\n auto do_it = [](XlaOp input) {\n XlaOp one_half = ScalarLike(input, 0.5);\n XlaOp one = ScalarLike(input, 1);\n XlaOp pi = ScalarLike(input, M_PI);\n XlaOp log_pi = ScalarLike(input, std::log(M_PI));\n XlaOp log_sqrt_two_pi =\n ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);\n XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);\n XlaOp log_lanczos_gamma_plus_one_half =\n ScalarLike(input, std::log(kLanczosGamma + 0.5));\n XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);\n XlaOp need_to_reflect = Lt(input, one_half);\n XlaOp z = Select(need_to_reflect, -input, input - one);\n XlaOp x = base_lanczos_coeff;\n for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {\n XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);\n XlaOp index = ScalarLike(input, i);\n x = x + lanczos_coefficient / (z + index + one);\n }\n XlaOp t = lanczos_gamma_plus_one_half + z;\n XlaOp log_t = log_lanczos_gamma_plus_one_half +\n Log1p(z / lanczos_gamma_plus_one_half);\n XlaOp log_y = log_sqrt_two_pi + (z + one_half - t / log_t) * log_t + Log(x);\n XlaOp abs_input = Abs(input);\n XlaOp abs_frac_input = abs_input - Floor(abs_input);\n XlaOp reduced_frac_input =\n Select(Gt(abs_frac_input, ScalarLike(abs_frac_input, 0.5)),\n ScalarLike(abs_frac_input, 1) - abs_frac_input, abs_frac_input);\n XlaOp reflection_denom = Log(Sin(pi * reduced_frac_input));\n XlaOp reflection =\n Select(IsFinite(reflection_denom), log_pi - reflection_denom - log_y,\n -reflection_denom);\n XlaOp result = Select(need_to_reflect, reflection, log_y);\n XlaOp inf_bcast = FullLike(input, std::numeric_limits::infinity());\n return Select(IsInf(input), inf_bcast, result);\n };\n auto& b = *input.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Lgamma\", input));\n return DoWithUpcastToF32(input, {}, do_it);\n });\n}\nstatic XlaOp Lbeta(XlaOp a, XlaOp b) {\n return Lgamma(a) + Lgamma(b) - Lgamma(a + b);\n}\nXlaOp Digamma(XlaOp input) {\n auto do_it = [](XlaOp input) {\n XlaOp zero = ScalarLike(input, 0);\n XlaOp one_half = ScalarLike(input, 0.5);\n XlaOp one = ScalarLike(input, 1);\n XlaOp pi = ScalarLike(input, M_PI);\n XlaOp lanczos_gamma = ScalarLike(input, kLanczosGamma);\n XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);\n XlaOp log_lanczos_gamma_plus_one_half =\n ScalarLike(input, std::log(kLanczosGamma + 0.5));\n XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);\n XlaOp need_to_reflect = Lt(input, one_half);\n XlaOp z = Select(need_to_reflect, -input, input - one);\n XlaOp num = zero;\n XlaOp denom = base_lanczos_coeff;\n for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {\n XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);\n XlaOp index = ScalarLike(input, i);\n num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));\n denom = denom + lanczos_coefficient / (z + index + one);\n }\n XlaOp t = lanczos_gamma_plus_one_half + z;\n XlaOp log_t = log_lanczos_gamma_plus_one_half +\n Log1p(z / lanczos_gamma_plus_one_half);\n XlaOp y = log_t + num / denom - lanczos_gamma / t;\n XlaOp reduced_input = input + Abs(Floor(input + ScalarLike(input, 0.5)));\n XlaOp reflection =\n y - pi * Cos(pi * reduced_input) / Sin(pi * reduced_input);\n XlaOp real_result = Select(need_to_reflect, reflection, y);\n return Select(And(Le(input, zero), Eq(input, Floor(input))),\n FullLike(input, std::numeric_limits::quiet_NaN()),\n real_result);\n };\n auto& b = *input.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Digamma\", input));\n return DoWithUpcastToF32(input, {}, do_it);\n });\n}\nnamespace {\nenum kIgammaMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE };\ntemplate \nXlaOp IgammaSeries(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,\n xla::PrimitiveType type) {\n auto cond = [&](absl::Span vals,\n XlaBuilder* builder) -> absl::StatusOr {\n XlaOp enabled = vals[0];\n return Any(enabled);\n };\n auto body = [&](absl::Span vals,\n XlaBuilder* builder) -> absl::StatusOr> {\n XlaOp enabled = vals[0];\n XlaOp r = vals[1];\n XlaOp c = vals[2];\n XlaOp ans = vals[3];\n XlaOp x = vals[4];\n XlaOp dc_da = vals[5];\n XlaOp dans_da = vals[6];\n r = r + ScalarLike(r, 1);\n dc_da = dc_da * (x / r) + (ScalarLike(r, -1) * c * x) / (r * r);\n dans_da = dans_da + dc_da;\n c = c * (x / r);\n ans = ans + c;\n XlaOp conditional;\n if (mode == VALUE) {\n conditional = And(enabled, Gt(c / ans, Epsilon(builder, type)));\n } else {\n conditional =\n And(enabled, Gt(Abs(dc_da / dans_da), Epsilon(builder, type)));\n }\n return std::vector{\n conditional,\n Select(enabled, r, vals[1]),\n Select(enabled, c, vals[2]),\n Select(enabled, ans, vals[3]),\n Select(enabled, x, vals[4]),\n Select(enabled, dc_da, vals[5]),\n Select(enabled, dans_da, vals[6]),\n };\n };\n auto& b = *ax.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n std::vector vals = {\n enabled, a, FullLike(a, 1), FullLike(a, 1), x, FullLike(a, 0),\n FullLike(a, 0),\n };\n TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, \"igamma\", &b));\n XlaOp ans = vals[3];\n XlaOp dans_da = vals[6];\n if (mode == VALUE) {\n return (ans * ax) / a;\n }\n XlaOp dlogax_da = Log(x) - Digamma(a + ScalarLike(a, 1));\n switch (mode) {\n case DERIVATIVE:\n return ax * (ans * dlogax_da + dans_da) / a;\n case SAMPLE_DERIVATIVE:\n default:\n return -(dans_da + ans * dlogax_da) * x / a;\n }\n });\n}\ntemplate \nXlaOp IgammacContinuedFraction(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,\n xla::PrimitiveType type) {\n auto cond = [&](absl::Span vals,\n XlaBuilder* builder) -> absl::StatusOr {\n XlaOp enabled = vals[0];\n XlaOp c = vals[5];\n return And(Lt(c, ScalarLike(c, 2000)), Any(enabled));\n };\n auto body = [&](absl::Span vals,\n XlaBuilder* builder) -> absl::StatusOr> {\n XlaOp enabled = vals[0];\n XlaOp ans = vals[1];\n XlaOp t = vals[2];\n XlaOp y = vals[3];\n XlaOp z = vals[4];\n XlaOp c = vals[5];\n XlaOp pkm1 = vals[6];\n XlaOp qkm1 = vals[7];\n XlaOp pkm2 = vals[8];\n XlaOp qkm2 = vals[9];\n XlaOp dpkm2_da = vals[10];\n XlaOp dqkm2_da = vals[11];\n XlaOp dpkm1_da = vals[12];\n XlaOp dqkm1_da = vals[13];\n XlaOp dans_da = vals[14];\n c = c + ScalarLike(c, 1);\n y = y + ScalarLike(y, 1);\n z = z + ScalarLike(z, 2);\n XlaOp yc = y * c;\n XlaOp pk = pkm1 * z - pkm2 * yc;\n XlaOp qk = qkm1 * z - qkm2 * yc;\n XlaOp qk_is_nonzero = Ne(qk, ScalarLike(qk, 0));\n XlaOp r = pk / qk;\n t = Select(qk_is_nonzero, Abs((ans - r) / r), FullLike(t, 1));\n ans = Select(qk_is_nonzero, r, ans);\n XlaOp dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c;\n XlaOp dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c;\n XlaOp dans_da_new =\n Select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da);\n XlaOp grad_conditional =\n Select(qk_is_nonzero, Abs(dans_da_new - dans_da), FullLike(dans_da, 1));\n pkm2 = pkm1;\n pkm1 = pk;\n qkm2 = qkm1;\n qkm1 = qk;\n dpkm2_da = dpkm1_da;\n dqkm2_da = dqkm1_da;\n dpkm1_da = dpk_da;\n dqkm1_da = dqk_da;\n XlaOp rescale = Gt(Abs(pk), Reciprocal(Epsilon(builder, type)));\n pkm2 = Select(rescale, pkm2 * Epsilon(builder, type), pkm2);\n pkm1 = Select(rescale, pkm1 * Epsilon(builder, type), pkm1);\n qkm2 = Select(rescale, qkm2 * Epsilon(builder, type), qkm2);\n qkm1 = Select(rescale, qkm1 * Epsilon(builder, type), qkm1);\n dpkm2_da = Select(rescale, dpkm2_da * Epsilon(builder, type), dpkm2_da);\n dqkm2_da = Select(rescale, dqkm2_da * Epsilon(builder, type), dqkm2_da);\n dpkm1_da = Select(rescale, dpkm1_da * Epsilon(builder, type), dpkm1_da);\n dqkm1_da = Select(rescale, dqkm1_da * Epsilon(builder, type), dqkm1_da);\n XlaOp conditional;\n if (mode == VALUE) {\n conditional = And(enabled, Gt(t, Epsilon(builder, type)));\n } else {\n conditional = And(enabled, Gt(grad_conditional, Epsilon(builder, type)));\n }\n return std::vector{conditional,\n Select(enabled, ans, vals[1]),\n Select(enabled, t, vals[2]),\n Select(enabled, y, vals[3]),\n Select(enabled, z, vals[4]),\n c,\n Select(enabled, pkm1, vals[6]),\n Select(enabled, qkm1, vals[7]),\n Select(enabled, pkm2, vals[8]),\n Select(enabled, qkm2, vals[9]),\n Select(enabled, dpkm2_da, vals[10]),\n Select(enabled, dqkm2_da, vals[11]),\n Select(enabled, dpkm1_da, vals[12]),\n Select(enabled, dqkm1_da, vals[13]),\n Select(enabled, dans_da_new, vals[14])};\n };\n auto& b = *ax.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n XlaOp y = ScalarLike(a, 1) - a;\n XlaOp z = x + y + ScalarLike(x, 1);\n XlaOp c = ScalarLike(x, 0);\n XlaOp pkm2 = FullLike(x, 1);\n XlaOp qkm2 = x;\n XlaOp pkm1 = x + ScalarLike(x, 1);\n XlaOp qkm1 = z * x;\n XlaOp ans = pkm1 / qkm1;\n XlaOp t = FullLike(x, 1);\n XlaOp dpkm2_da = FullLike(x, 0);\n XlaOp dqkm2_da = FullLike(x, 0);\n XlaOp dpkm1_da = FullLike(x, 0);\n XlaOp dqkm1_da = -x;\n XlaOp dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1;\n std::vector vals = {enabled, ans, t, y, z,\n c, pkm1, qkm1, pkm2, qkm2,\n dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da};\n TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, \"igammac\", &b));\n ans = vals[1];\n if (mode == VALUE) {\n return ans * ax;\n }\n dans_da = vals[14];\n XlaOp dlogax_da = Log(x) - Digamma(a);\n switch (mode) {\n case DERIVATIVE:\n return ax * (ans * dlogax_da + dans_da);\n case SAMPLE_DERIVATIVE:\n default:\n return -(dans_da + ans * dlogax_da) * x;\n }\n });\n}\n} \nXlaOp Igamma(XlaOp a, XlaOp x) {\n auto& b = *a.builder();\n auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {\n XlaOp is_nan = Or(IsNan(a), IsNan(x));\n XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));\n XlaOp x_is_infinity =\n Eq(x, ScalarLike(x, std::numeric_limits::infinity()));\n XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));\n XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));\n XlaOp ax = a * Log(x) - x - Lgamma(a);\n XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));\n ax = Exp(ax);\n XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));\n const double nan = std::numeric_limits::quiet_NaN();\n XlaOp output = Select(\n use_igammac,\n ScalarLike(a, 1) - IgammacContinuedFraction(\n ax, x, a, And(enabled, use_igammac), type),\n IgammaSeries(ax, x, a, And(enabled, Not(use_igammac)), type));\n output = Select(x_is_zero, ZerosLike(output), output);\n output = Select(x_is_infinity, FullLike(output, 1), output);\n output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);\n return output;\n };\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));\n TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));\n if (a_shape != x_shape) {\n return InvalidArgument(\n \"Arguments to Igamma must have equal shapes and types; got %s and %s\",\n a_shape.ToString(), x_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Igamma\", a));\n PrimitiveType a_x_type = a_shape.element_type();\n bool needs_upcast = false;\n for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,\n F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {\n if (a_shape.element_type() == type) {\n needs_upcast = true;\n break;\n }\n }\n if (needs_upcast) {\n a = ConvertElementType(a, F32);\n x = ConvertElementType(x, F32);\n a_x_type = F32;\n }\n XlaOp result = doit(a, x, a_x_type);\n if (needs_upcast) {\n result = ConvertElementType(result, a_shape.element_type());\n }\n return result;\n });\n}\nXlaOp IgammaGradA(XlaOp a, XlaOp x) {\n auto& b = *a.builder();\n auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {\n XlaOp is_nan = Or(IsNan(a), IsNan(x));\n XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));\n XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));\n XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));\n XlaOp ax = a * Log(x) - x - Lgamma(a);\n XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));\n ax = Exp(ax);\n XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));\n const double nan = std::numeric_limits::quiet_NaN();\n XlaOp output = Select(use_igammac,\n -IgammacContinuedFraction(\n ax, x, a, And(enabled, use_igammac), type),\n IgammaSeries(\n ax, x, a, And(enabled, Not(use_igammac)), type));\n output = Select(x_is_zero, ZerosLike(output), output);\n output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);\n return output;\n };\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));\n TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));\n if (a_shape != x_shape) {\n return InvalidArgument(\n \"Arguments to IgammaGradA must have equal shapes and types; got %s \"\n \"and %s\",\n a_shape.ToString(), x_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"IgammaGradA\", a));\n bool needs_upcast = false;\n for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,\n F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {\n if (a_shape.element_type() == type) {\n needs_upcast = true;\n break;\n }\n }\n if (needs_upcast) {\n a = ConvertElementType(a, F32);\n x = ConvertElementType(x, F32);\n }\n XlaOp result = doit(a, x, a_shape.element_type());\n if (needs_upcast) {\n result = ConvertElementType(result, a_shape.element_type());\n }\n return result;\n });\n}\nXlaOp RandomGammaGrad(XlaOp a, XlaOp x) {\n auto& b = *a.builder();\n auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {\n XlaOp is_nan = Or(IsNan(a), IsNan(x));\n XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));\n XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));\n XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));\n XlaOp ax = a * Log(x) - x - Lgamma(a);\n XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));\n ax = Exp(ax);\n XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));\n const double nan = std::numeric_limits::quiet_NaN();\n XlaOp output = Select(use_igammac,\n -IgammacContinuedFraction(\n ax, x, a, And(enabled, use_igammac), type),\n IgammaSeries(\n ax, x, a, And(enabled, Not(use_igammac)), type));\n output = Select(x_is_zero, ZerosLike(output), output);\n output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);\n return output;\n };\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));\n TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));\n if (a_shape != x_shape) {\n return InvalidArgument(\n \"Arguments to RandomGammaGrad must have equal shapes and types; got \"\n \"%s and %s\",\n a_shape.ToString(), x_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"RandomGammaGrad\", a));\n bool needs_upcast =\n a_shape.element_type() == F16 || a_shape.element_type() == BF16;\n if (needs_upcast) {\n a = ConvertElementType(a, F32);\n x = ConvertElementType(x, F32);\n }\n XlaOp result = doit(a, x, a_shape.element_type());\n if (needs_upcast) {\n result = ConvertElementType(result, a_shape.element_type());\n }\n return result;\n });\n}\nXlaOp Igammac(XlaOp a, XlaOp x) {\n auto& b = *a.builder();\n auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {\n XlaOp out_of_range = Or(Le(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));\n XlaOp use_igamma = Or(Lt(x, ScalarLike(x, 1)), Lt(x, a));\n XlaOp ax = a * Log(x) - x - Lgamma(a);\n XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));\n XlaOp enabled = Not(Or(out_of_range, underflow));\n ax = Exp(ax);\n XlaOp result =\n Select(use_igamma,\n ScalarLike(a, 1) - IgammaSeries(\n ax, x, a, And(enabled, use_igamma), type),\n IgammacContinuedFraction(\n ax, x, a, And(enabled, Not(use_igamma)), type));\n XlaOp x_is_infinity =\n Eq(x, ScalarLike(x, std::numeric_limits::infinity()));\n result = Select(x_is_infinity, ZerosLike(result), result);\n return Select(out_of_range, FullLike(a, 1), result);\n };\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));\n TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));\n if (a_shape != x_shape) {\n return InvalidArgument(\n \"Arguments to Igammac must have equal shapes and types; \"\n \"got %s and %s\",\n a_shape.ToString(), x_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Igammac\", a));\n PrimitiveType a_x_type = a_shape.element_type();\n bool needs_upcast =\n a_shape.element_type() == F16 || a_shape.element_type() == BF16;\n if (needs_upcast) {\n a = ConvertElementType(a, F32);\n x = ConvertElementType(x, F32);\n a_x_type = F32;\n }\n XlaOp result = doit(a, x, a_x_type);\n if (needs_upcast) {\n result = ConvertElementType(result, a_shape.element_type());\n }\n return result;\n });\n}\nXlaOp RoundToEven(XlaOp x) {\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"RoundToEven\", x));\n return RoundNearestEven(x);\n });\n}\nXlaOp Acos(XlaOp x) {\n XlaBuilder* b = x.builder();\n return b->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n if (primitive_util::IsComplexType(shape.element_type())) {\n auto one = ScalarLike(x, 1);\n auto imag_one = Complex(\n Zero(b, primitive_util::ComplexComponentType(shape.element_type())),\n One(b, primitive_util::ComplexComponentType(shape.element_type())));\n auto result =\n Neg(imag_one * Log(x + imag_one * Sqrt((one + x) * (one - x))));\n return result;\n }\n return Select(Ne(x, FullLike(x, -1)),\n ScalarLike(x, 2.0) * Atan2(Sqrt(ScalarLike(x, 1.0) - x * x),\n ScalarLike(x, 1.0) + x),\n FullLike(x, M_PI));\n });\n}\nXlaOp Asin(XlaOp x) {\n XlaBuilder* b = x.builder();\n auto do_it = [&](XlaOp z) -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(z));\n auto elem_ty = shape.element_type();\n switch (elem_ty) {\n case C128:\n return math_impl::AsinComplex(z);\n case C64:\n return math_impl::AsinComplex(z);\n case F64:\n return math_impl::AsinReal(z);\n case F32:\n return math_impl::AsinReal(z);\n default:\n return InvalidArgument(\"Asin got unsupported element type %s\",\n PrimitiveType_Name(elem_ty));\n }\n };\n return DoWithUpcastToF32(\n x, {}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); });\n}\nXlaOp Atan(XlaOp x) { return Atan2(x, ScalarLike(x, 1.0)); }\nXlaOp Acosh(XlaOp x) {\n XlaBuilder* b = x.builder();\n return b->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n auto one = ScalarLike(x, 1);\n auto neg_one = ScalarLike(x, -1);\n auto nan = FullLike(x, std::numeric_limits::quiet_NaN());\n auto naive_result = Log(x + Sqrt((x + one) * (x - one)));\n if (primitive_util::IsComplexType(shape.element_type())) {\n return naive_result;\n }\n auto overflow_result = Log(x) + Log(ScalarLike(x, 2));\n auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));\n return Select(Lt(x, neg_one), nan,\n Select(Ge(x, sqrt_max_value), overflow_result, naive_result));\n });\n}\nXlaOp Asinh(XlaOp x) {\n XlaBuilder* b = x.builder();\n auto do_it = [&](XlaOp x) -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n auto one = ScalarLike(x, 1);\n if (primitive_util::IsComplexType(shape.element_type())) {\n auto x_re = Real(x);\n auto x_im = Imag(x);\n auto z = Asin(Complex(x_im, -x_re));\n auto z_im = Imag(z);\n auto on_branch_cut = And(Eq(x_re, ScalarLike(x_re, 0)),\n Gt(Abs(x_im), ScalarLike(x_im, 1)));\n return Complex(Select(on_branch_cut, z_im, -z_im), Real(z));\n }\n auto a = Abs(x);\n auto small_result = Log1p(a + a * a / (one + Sqrt(a * a + one)));\n auto naive_result = Log(a + Sqrt(a * a + one));\n auto overflow_result = Log(Abs(a)) + Log(ScalarLike(a, 2));\n auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));\n return Sign(x) * Select(Ge(a, sqrt_max_value), overflow_result,\n Select(Le(a, one), small_result, naive_result));\n };\n return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {\n return b->ReportErrorOrReturn(do_it(x));\n });\n}\nXlaOp Atanh(XlaOp x) {\n XlaBuilder* b = x.builder();\n auto do_it = [&](XlaOp x) -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n auto naive_result = (Log1p(x) - Log1p(-x)) * ScalarLike(x, 0.5);\n if (primitive_util::IsComplexType(shape.element_type())) {\n return naive_result;\n }\n auto nan = FullLike(x, std::numeric_limits::quiet_NaN());\n return Select(Gt(Abs(x), ScalarLike(x, 1)), nan, naive_result);\n };\n return DoWithUpcastToF32(x, {BF16}, [&](XlaOp x) { \n return b->ReportErrorOrReturn(do_it(x));\n });\n}\nXlaOp Cosh(XlaOp x) {\n XlaBuilder* b = x.builder();\n auto do_it = [&](XlaOp x) -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n auto log_one_half = Log(ScalarLike(x, 0.5));\n auto result = Exp(x + log_one_half) + Exp(-x + log_one_half);\n if (primitive_util::IsComplexType(shape.element_type())) {\n return result;\n }\n return Max(result, ScalarLike(result, 1.0));\n };\n return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {\n return b->ReportErrorOrReturn(do_it(x));\n });\n}\nXlaOp Sinh(XlaOp x) {\n XlaBuilder* b = x.builder();\n auto do_it = [&](XlaOp x) -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));\n auto one_half = ScalarLike(x, 0.5);\n auto log_one_half = Log(ScalarLike(x, 0.5));\n auto large_sinh_result = Exp(x + log_one_half) - Exp(-x + log_one_half);\n if (primitive_util::IsComplexType(shape.element_type())) {\n return large_sinh_result;\n }\n auto expm1 = Expm1(x);\n auto one = ScalarLike(x, 1.);\n auto small_sinh_result = one_half * (expm1 + expm1 / (expm1 + one));\n return Select(Lt(Abs(x), one), small_sinh_result, large_sinh_result);\n };\n return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {\n return b->ReportErrorOrReturn(do_it(x));\n });\n}\nXlaOp MaybeConjugate(XlaOp x, bool conjugate) {\n XlaBuilder* builder = x.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));\n auto perform_conj =\n primitive_util::IsComplexType(shape.element_type()) && conjugate;\n return perform_conj ? Conj(x) : x;\n });\n}\nXlaOp NextAfter(XlaOp from, XlaOp to) {\n auto builder = from.builder();\n return builder->ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto shape, builder->GetShape(from));\n int bitwidth = primitive_util::BitWidth(shape.element_type());\n auto int_type = primitive_util::UnsignedIntegralTypeForBitWidth(bitwidth);\n auto from_as_int = BitcastConvertType(from, int_type);\n auto to_as_int = BitcastConvertType(to, int_type);\n auto from_is_nan = Ne(from, from);\n auto to_is_nan = Ne(to, to);\n auto nan_input = Or(from_is_nan, to_is_nan);\n auto result_for_nan =\n Broadcast(ScalarLike(from, std::numeric_limits::quiet_NaN()),\n shape.dimensions());\n result_for_nan = BitcastConvertType(result_for_nan, int_type);\n const int64_t sign_mask = int64_t{1} << (bitwidth - 1);\n auto from_abs = And(from_as_int, ScalarLike(from_as_int, ~sign_mask));\n auto to_abs = And(to_as_int, ScalarLike(to_as_int, ~sign_mask));\n auto from_and_to_are_equal = Eq(from_as_int, to_as_int);\n auto result_for_equal = to_as_int;\n auto from_is_zero = Eq(from_abs, ZerosLike(from_abs));\n auto to_is_zero = Eq(to_abs, ZerosLike(to_abs));\n auto result_for_both_zero = to_as_int;\n auto from_sign = And(from_as_int, ScalarLike(from_as_int, sign_mask));\n auto to_sign = And(to_as_int, ScalarLike(to_as_int, sign_mask));\n auto result_for_from_zero_to_non_zero =\n Or(to_sign, ScalarLike(from_as_int, 1));\n auto signs_disagree = Ne(from_sign, to_sign);\n auto from_magnitude_larger_than_to = Gt(from_abs, to_abs);\n auto result_has_smaller_magnitude =\n Or(from_magnitude_larger_than_to, signs_disagree);\n auto magnitude_adjustment =\n Select(result_has_smaller_magnitude,\n Broadcast(ScalarLike(from_as_int, -1), shape.dimensions()),\n Broadcast(ScalarLike(from_as_int, 1), shape.dimensions()));\n auto result = Add(from_as_int, magnitude_adjustment);\n result = Select(from_is_zero,\n Select(to_is_zero, result_for_both_zero,\n result_for_from_zero_to_non_zero),\n result);\n result = Select(from_and_to_are_equal, result_for_equal, result);\n result = Select(nan_input, result_for_nan, result);\n return BitcastConvertType(result, shape.element_type());\n });\n}\nstatic XlaOp I0eImpl32(XlaOp x) {\n static const std::array kI0eCoeffsA{\n -1.30002500998624804212E-8f, 6.04699502254191894932E-8f,\n -2.67079385394061173391E-7f, 1.11738753912010371815E-6f,\n -4.41673835845875056359E-6f, 1.64484480707288970893E-5f,\n -5.75419501008210370398E-5f, 1.88502885095841655729E-4f,\n -5.76375574538582365885E-4f, 1.63947561694133579842E-3f,\n -4.32430999505057594430E-3f, 1.05464603945949983183E-2f,\n -2.37374148058994688156E-2f, 4.93052842396707084878E-2f,\n -9.49010970480476444210E-2f, 1.71620901522208775349E-1f,\n -3.04682672343198398683E-1f, 6.76795274409476084995E-1f};\n static const std::array kI0eCoeffsB{\n 3.39623202570838634515E-9f, 2.26666899049817806459E-8f,\n 2.04891858946906374183E-7f, 2.89137052083475648297E-6f,\n 6.88975834691682398426E-5f, 3.36911647825569408990E-3f,\n 8.04490411014108831608E-1f};\n x = Abs(x);\n auto half = xla::ScalarLike(x, 0.5);\n auto two = xla::ScalarLike(x, 2.0);\n auto thirty_two = xla::ScalarLike(x, 32.0);\n auto result_le_8 =\n EvaluateChebyshevPolynomial(half * x - two, kI0eCoeffsA);\n auto result_gt_8 =\n EvaluateChebyshevPolynomial(thirty_two / x - two, kI0eCoeffsB) /\n Sqrt(x);\n return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);\n}\nstatic XlaOp I0eImpl64(XlaOp x) {\n static const std::array kI0eCoeffsA{\n -4.41534164647933937950E-18, 3.33079451882223809783E-17,\n -2.43127984654795469359E-16, 1.71539128555513303061E-15,\n -1.16853328779934516808E-14, 7.67618549860493561688E-14,\n -4.85644678311192946090E-13, 2.95505266312963983461E-12,\n -1.72682629144155570723E-11, 9.67580903537323691224E-11,\n -5.18979560163526290666E-10, 2.65982372468238665035E-9,\n -1.30002500998624804212E-8, 6.04699502254191894932E-8,\n -2.67079385394061173391E-7, 1.11738753912010371815E-6,\n -4.41673835845875056359E-6, 1.64484480707288970893E-5,\n -5.75419501008210370398E-5, 1.88502885095841655729E-4,\n -5.76375574538582365885E-4, 1.63947561694133579842E-3,\n -4.32430999505057594430E-3, 1.05464603945949983183E-2,\n -2.37374148058994688156E-2, 4.93052842396707084878E-2,\n -9.49010970480476444210E-2, 1.71620901522208775349E-1,\n -3.04682672343198398683E-1, 6.76795274409476084995E-1};\n static const std::array kI0eCoeffsB{\n -7.23318048787475395456E-18, -4.83050448594418207126E-18,\n 4.46562142029675999901E-17, 3.46122286769746109310E-17,\n -2.82762398051658348494E-16, -3.42548561967721913462E-16,\n 1.77256013305652638360E-15, 3.81168066935262242075E-15,\n -9.55484669882830764870E-15, -4.15056934728722208663E-14,\n 1.54008621752140982691E-14, 3.85277838274214270114E-13,\n 7.18012445138366623367E-13, -1.79417853150680611778E-12,\n -1.32158118404477131188E-11, -3.14991652796324136454E-11,\n 1.18891471078464383424E-11, 4.94060238822496958910E-10,\n 3.39623202570838634515E-9, 2.26666899049817806459E-8,\n 2.04891858946906374183E-7, 2.89137052083475648297E-6,\n 6.88975834691682398426E-5, 3.36911647825569408990E-3,\n 8.04490411014108831608E-1};\n x = Abs(x);\n auto half = xla::ScalarLike(x, 0.5);\n auto two = xla::ScalarLike(x, 2.0);\n auto thirty_two = xla::ScalarLike(x, 32.0);\n auto result_le_8 =\n EvaluateChebyshevPolynomial(half * x - two, kI0eCoeffsA);\n auto result_gt_8 =\n EvaluateChebyshevPolynomial(thirty_two / x - two, kI0eCoeffsB) /\n Sqrt(x);\n return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);\n}\nXlaOp BesselI0e(XlaOp x) {\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"BesselI0e\", x));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));\n if (shape.element_type() == F64) {\n return I0eImpl64(x);\n }\n return DoWithUpcastToF32(x, {BF16, F16},\n [](XlaOp x) { return I0eImpl32(x); });\n });\n}\nstatic XlaOp I1eImpl32(XlaOp x) {\n static const std::array kI1eCoeffsA{\n 9.38153738649577178388E-9f, -4.44505912879632808065E-8f,\n 2.00329475355213526229E-7f, -8.56872026469545474066E-7f,\n 3.47025130813767847674E-6f, -1.32731636560394358279E-5f,\n 4.78156510755005422638E-5f, -1.61760815825896745588E-4f,\n 5.12285956168575772895E-4f, -1.51357245063125314899E-3f,\n 4.15642294431288815669E-3f, -1.05640848946261981558E-2f,\n 2.47264490306265168283E-2f, -5.29459812080949914269E-2f,\n 1.02643658689847095384E-1f, -1.76416518357834055153E-1f,\n 2.52587186443633654823E-1f};\n static const std::array kI1eCoeffsB{\n -3.83538038596423702205E-9f, -2.63146884688951950684E-8f,\n -2.51223623787020892529E-7f, -3.88256480887769039346E-6f,\n -1.10588938762623716291E-4f, -9.76109749136146840777E-3f,\n 7.78576235018280120474E-1f};\n XlaOp z = Abs(x);\n auto half = xla::ScalarLike(x, 0.5);\n auto two = xla::ScalarLike(x, 2.0);\n auto thirty_two = xla::ScalarLike(x, 32.0);\n auto result_le_8 =\n z * EvaluateChebyshevPolynomial(half * z - two, kI1eCoeffsA);\n auto result_gt_8 =\n EvaluateChebyshevPolynomial(thirty_two / z - two, kI1eCoeffsB) /\n Sqrt(z);\n return Sign(x) *\n Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);\n}\nstatic XlaOp I1eImpl64(XlaOp x) {\n static const std::array kI1eCoeffsA{\n 2.77791411276104639959E-18, -2.11142121435816608115E-17,\n 1.55363195773620046921E-16, -1.10559694773538630805E-15,\n 7.60068429473540693410E-15, -5.04218550472791168711E-14,\n 3.22379336594557470981E-13, -1.98397439776494371520E-12,\n 1.17361862988909016308E-11, -6.66348972350202774223E-11,\n 3.62559028155211703701E-10, -1.88724975172282928790E-9,\n 9.38153738649577178388E-9, -4.44505912879632808065E-8,\n 2.00329475355213526229E-7, -8.56872026469545474066E-7,\n 3.47025130813767847674E-6, -1.32731636560394358279E-5,\n 4.78156510755005422638E-5, -1.61760815825896745588E-4,\n 5.12285956168575772895E-4, -1.51357245063125314899E-3,\n 4.15642294431288815669E-3, -1.05640848946261981558E-2,\n 2.47264490306265168283E-2, -5.29459812080949914269E-2,\n 1.02643658689847095384E-1, -1.76416518357834055153E-1,\n 2.52587186443633654823E-1};\n static const std::array kI1eCoeffsB{\n 7.51729631084210481353E-18, 4.41434832307170791151E-18,\n -4.65030536848935832153E-17, -3.20952592199342395980E-17,\n 2.96262899764595013876E-16, 3.30820231092092828324E-16,\n -1.88035477551078244854E-15, -3.81440307243700780478E-15,\n 1.04202769841288027642E-14, 4.27244001671195135429E-14,\n -2.10154184277266431302E-14, -4.08355111109219731823E-13,\n -7.19855177624590851209E-13, 2.03562854414708950722E-12,\n 1.41258074366137813316E-11, 3.25260358301548823856E-11,\n -1.89749581235054123450E-11, -5.58974346219658380687E-10,\n -3.83538038596423702205E-9, -2.63146884688951950684E-8,\n -2.51223623787020892529E-7, -3.88256480887769039346E-6,\n -1.10588938762623716291E-4, -9.76109749136146840777E-3,\n 7.78576235018280120474E-1};\n XlaOp z = Abs(x);\n auto half = xla::ScalarLike(x, 0.5);\n auto two = xla::ScalarLike(x, 2.0);\n auto thirty_two = xla::ScalarLike(x, 32.0);\n auto result_le_8 =\n z * EvaluateChebyshevPolynomial(half * z - two, kI1eCoeffsA);\n auto result_gt_8 =\n EvaluateChebyshevPolynomial(thirty_two / z - two, kI1eCoeffsB) /\n Sqrt(z);\n return Sign(x) *\n Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);\n}\nXlaOp BesselI1e(XlaOp x) {\n auto& b = *x.builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"BesselI1e\", x));\n TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));\n if (shape.element_type() == F64) {\n return I1eImpl64(x);\n }\n return DoWithUpcastToF32(x, {BF16, F16},\n [](XlaOp x) { return I1eImpl32(x); });\n });\n}\nstatic XlaOp LentzThompsonBarnettAlgorithm(\n int64_t num_iterations, double small, double threshold,\n const ForEachIndexBodyFunction& nth_partial_numerator,\n const ForEachIndexBodyFunction& nth_partial_denominator,\n absl::Span inputs, absl::string_view name) {\n auto& b = *inputs.front().builder();\n return b.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_RET_CHECK(num_iterations < INT32_MAX);\n enum {\n kIterationIdx,\n kValuesUnconvergedIdx,\n kCIdx,\n kDIdx,\n kHIdx,\n kFirstInputIdx,\n };\n auto while_cond_fn =\n [num_iterations](absl::Span values,\n XlaBuilder* cond_builder) -> absl::StatusOr {\n auto iteration = values[kIterationIdx];\n auto iterations_remain_cond =\n Lt(iteration, ScalarLike(iteration, num_iterations));\n auto values_unconverged_cond = values[kValuesUnconvergedIdx];\n return And(iterations_remain_cond, values_unconverged_cond);\n };\n auto while_body_fn =\n [small, threshold, &nth_partial_numerator, &nth_partial_denominator](\n absl::Span values,\n XlaBuilder* body_builder) -> absl::StatusOr> {\n XlaOp iteration = values[kIterationIdx];\n TF_ASSIGN_OR_RETURN(\n std::vector partial_numerator,\n nth_partial_numerator(iteration, values.subspan(kFirstInputIdx),\n body_builder));\n TF_RET_CHECK(partial_numerator.size() == 1);\n TF_ASSIGN_OR_RETURN(\n std::vector partial_denominator,\n nth_partial_denominator(iteration, values.subspan(kFirstInputIdx),\n body_builder));\n TF_RET_CHECK(partial_denominator.size() == 1);\n auto c = partial_denominator[0] + partial_numerator[0] / values[kCIdx];\n auto small_constant = FullLike(c, small);\n c = Select(Lt(Abs(c), small_constant), small_constant, c);\n auto d = partial_denominator[0] + partial_numerator[0] * values[kDIdx];\n d = Select(Lt(Abs(d), small_constant), small_constant, d);\n d = Reciprocal(d);\n auto delta = c * d;\n auto h = values[kHIdx] * delta;\n std::vector updated_values(values.size());\n updated_values[kIterationIdx] = Add(iteration, ScalarLike(iteration, 1));\n updated_values[kCIdx] = c;\n updated_values[kDIdx] = d;\n updated_values[kHIdx] = h;\n std::copy(values.begin() + kFirstInputIdx, values.end(),\n updated_values.begin() + kFirstInputIdx);\n auto tolerance_comparison =\n Ge(Abs(Sub(delta, FullLike(delta, 1.0))), FullLike(delta, threshold));\n updated_values[kValuesUnconvergedIdx] =\n ReduceAll(tolerance_comparison, ConstantR0(body_builder, false),\n CreateScalarOrComputation(PRED, body_builder));\n return updated_values;\n };\n TF_ASSIGN_OR_RETURN(std::vector partial_denominator,\n nth_partial_denominator(Zero(&b, U32), inputs, &b));\n TF_RET_CHECK(partial_denominator.size() == 1);\n auto h = partial_denominator[0];\n auto small_constant = FullLike(h, small);\n h = Select(Lt(Abs(h), small_constant), small_constant, h);\n std::vector values(kFirstInputIdx + inputs.size());\n values[kIterationIdx] = One(&b, U32);\n values[kValuesUnconvergedIdx] = ConstantR0(&b, true);\n values[kCIdx] = h;\n values[kDIdx] = FullLike(h, 0.0);\n values[kHIdx] = h;\n std::copy(inputs.begin(), inputs.end(), values.begin() + kFirstInputIdx);\n TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn,\n values, name, &b));\n return values[kHIdx];\n });\n}\nXlaOp RegularizedIncompleteBeta(XlaOp a, XlaOp b, XlaOp x) {\n auto& builder = *x.builder();\n return builder.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(Shape shape, builder.GetShape(a));\n TF_ASSIGN_OR_RETURN(Shape b_shape, builder.GetShape(b));\n TF_ASSIGN_OR_RETURN(Shape x_shape, builder.GetShape(x));\n if (b_shape.element_type() != shape.element_type() ||\n x_shape.element_type() != shape.element_type()) {\n return InvalidArgument(\n \"Operands to RegularizedIncompleteBeta must have identical types, \"\n \"got shapes %s, %s, and %s\",\n shape.ToString(), b_shape.ToString(), x_shape.ToString());\n }\n if (!primitive_util::IsFloatingPointType(shape.element_type())) {\n return InvalidArgument(\n \"Operands to RegularizedIncompleteBeta must be real-valued \"\n \"floating-point, but got %s\",\n PrimitiveType_Name(shape.element_type()));\n }\n PrimitiveType element_type = shape.element_type();\n if (element_type == F16 || element_type == BF16) {\n element_type = F32;\n a = ConvertElementType(a, F32);\n b = ConvertElementType(b, F32);\n x = ConvertElementType(x, F32);\n }\n auto NthPartialBetaincNumerator =\n [&](XlaOp iteration, absl::Span inputs,\n XlaBuilder* builder) -> absl::StatusOr> {\n auto a = inputs[0];\n auto b = inputs[1];\n auto x = inputs[2];\n auto iteration_bcast = Broadcast(iteration, shape.dimensions());\n auto iteration_is_even =\n Eq(iteration_bcast % FullLike(iteration_bcast, 2),\n FullLike(iteration_bcast, 0));\n auto iteration_is_one = Eq(iteration_bcast, FullLike(iteration_bcast, 1));\n auto iteration_minus_one = iteration_bcast - FullLike(iteration_bcast, 1);\n auto m = iteration_minus_one / FullLike(iteration_minus_one, 2);\n m = ConvertElementType(m, element_type);\n auto one = FullLike(a, 1.0);\n auto two = FullLike(a, 2.0);\n auto even_numerator =\n -(a + m) * (a + b + m) * x / ((a + two * m) * (a + two * m + one));\n auto odd_numerator =\n m * (b - m) * x / ((a + two * m - one) * (a + two * m));\n auto one_numerator = ScalarLike(x, 1.0);\n auto numerator = Select(iteration_is_even, even_numerator, odd_numerator);\n return std::vector{\n Select(iteration_is_one, one_numerator, numerator)};\n };\n auto NthPartialBetaincDenominator =\n [&shape](XlaOp iteration, absl::Span inputs,\n XlaBuilder* builder) -> absl::StatusOr> {\n auto x = inputs[2];\n auto iteration_bcast = Broadcast(iteration, shape.dimensions());\n return std::vector{\n Select(Eq(iteration_bcast, ScalarLike(iteration_bcast, 0)),\n ScalarLike(x, 0.0), ScalarLike(x, 1.0))};\n };\n auto result_is_nan =\n Or(Or(Or(Le(a, ScalarLike(a, 0.0)), Le(b, ScalarLike(b, 0.0))),\n Lt(x, ScalarLike(x, 0.0))),\n Gt(x, ScalarLike(x, 1.0)));\n auto converges_rapidly =\n Lt(x, (a + FullLike(a, 1.0)) / (a + b + FullLike(b, 2.0)));\n auto a_orig = a;\n a = Select(converges_rapidly, a, b);\n b = Select(converges_rapidly, b, a_orig);\n x = Select(converges_rapidly, x, Sub(FullLike(x, 1.0), x));\n XlaOp continued_fraction;\n if (element_type == F32) {\n continued_fraction = LentzThompsonBarnettAlgorithm(\n 200,\n std::numeric_limits::epsilon() / 2.0f,\n std::numeric_limits::epsilon() / 2.0f,\n NthPartialBetaincNumerator,\n NthPartialBetaincDenominator, {a, b, x},\n \"Betainc\");\n } else {\n TF_RET_CHECK(element_type == F64);\n continued_fraction = LentzThompsonBarnettAlgorithm(\n 600,\n std::numeric_limits::epsilon() / 2.0f,\n std::numeric_limits::epsilon() / 2.0f,\n NthPartialBetaincNumerator,\n NthPartialBetaincDenominator, {a, b, x},\n \"Betainc\");\n }\n auto lbeta = Lbeta(a, b);\n auto result =\n continued_fraction * Exp(Log(x) * a + Log1p(-x) * b - lbeta) / a;\n result = Select(result_is_nan, NanValue(&builder, element_type), result);\n auto out =\n Select(converges_rapidly, result, Sub(FullLike(result, 1.0), result));\n return shape.element_type() == element_type\n ? out\n : ConvertElementType(out, shape.element_type());\n });\n}\nXlaOp Polygamma(XlaOp n, XlaOp x) {\n auto& builder = *x.builder();\n auto doit = [](XlaOp n, XlaOp x, PrimitiveType type) -> XlaOp {\n XlaOp n_plus_one = n + ScalarLike(n, 1.);\n XlaOp sign =\n (ScalarLike(n, 2.) * Rem(n, ScalarLike(n, 2.)) - ScalarLike(n, 1.));\n const double nan = std::numeric_limits::quiet_NaN();\n XlaOp output = Select(Eq(n, ScalarLike(n, 0.)), Digamma(x),\n sign * Exp(Lgamma(n_plus_one)) * Zeta(n_plus_one, x));\n output = Select(Or(Ne(n, Floor(n)), Lt(n, ScalarLike(n, 0.))),\n ScalarLike(n, nan), output);\n return output;\n };\n return builder.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto n_shape, builder.GetShape(n));\n TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));\n if (n_shape != x_shape) {\n return InvalidArgument(\n \"Arguments to Polygamma must have equal shapes and types; \"\n \"got %s and %s\",\n n_shape.ToString(), x_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Zeta\", x));\n bool needs_upcast =\n n_shape.element_type() == F16 || x_shape.element_type() == BF16;\n if (needs_upcast) {\n n = ConvertElementType(n, F32);\n x = ConvertElementType(x, F32);\n }\n XlaOp result = doit(n, x, n_shape.element_type());\n if (needs_upcast) {\n result = ConvertElementType(result, n_shape.element_type());\n }\n return result;\n });\n}\nXlaOp Zeta(XlaOp x, XlaOp q) {\n auto& builder = *x.builder();\n auto doit = [&builder](XlaOp x, XlaOp q, PrimitiveType type) -> XlaOp {\n static constexpr int M = 12, N = 9;\n static const std::array kZetaCoeffs{\n -7.1661652561756670113e18,\n 1.8152105401943546773e17,\n -4.5979787224074726105e15,\n 1.1646782814350067249e14,\n -2.950130727918164224e12,\n 7.47242496e10,\n -1.8924375803183791606e9,\n 47900160.0,\n -1209600.0,\n 30240.0,\n -720.0,\n 12.0,\n };\n XlaOp acc = q, neg_power = ScalarLike(q, 0.);\n XlaOp S = Pow(q, Neg(x));\n for (int i = 0; i < N; ++i) {\n acc = acc + ScalarLike(acc, 1.);\n neg_power = Pow(acc, Neg(x));\n S = S + neg_power;\n }\n acc = acc + ScalarLike(acc, 1.);\n neg_power = Pow(acc, Neg(x));\n XlaOp I = neg_power * acc / (x - ScalarLike(acc, 1.));\n XlaOp a_inverse_square = Reciprocal(Square(acc));\n XlaOp horner_sum = ScalarLike(acc, 0.);\n XlaOp factor = ScalarLike(acc, 1.);\n static constexpr int kTwoKMinusOne = 2 * M - 1;\n for (int i = 0; i < M - 1; ++i) {\n factor = (x + ScalarLike(x, kTwoKMinusOne - 1 - 2 * i)) *\n (x + ScalarLike(x, kTwoKMinusOne - 2 - 2 * i));\n horner_sum = factor * a_inverse_square *\n (horner_sum + ScalarLike(acc, 1. / kZetaCoeffs[i]));\n }\n XlaOp T =\n neg_power *\n (ScalarLike(neg_power, 0.5) +\n x / acc * (ScalarLike(acc, 1. / kZetaCoeffs[M - 1]) + horner_sum));\n XlaOp accurate_result = S + I + T;\n const double nan = std::numeric_limits::quiet_NaN();\n const double inf = std::numeric_limits::infinity();\n XlaOp output = Select(Lt(Abs(neg_power), Abs(S) * Epsilon(&builder, type)),\n S, accurate_result);\n output = Select(Eq(x, ScalarLike(x, 1.)), ScalarLike(x, inf), output);\n output = Select(Lt(x, ScalarLike(x, 1.)), ScalarLike(x, nan), output);\n XlaOp x_domain_error = And(Le(q, ScalarLike(x, 0.)), Ne(x, Floor(x)));\n output = Select(x_domain_error, ScalarLike(x, nan), output);\n XlaOp at_pole = And(Le(q, ScalarLike(x, 0.)), Eq(q, Floor(q)));\n XlaOp x_is_even_int =\n And(Eq(Rem(x, ScalarLike(x, 2.)), ScalarLike(x, 0.)), Eq(x, Floor(x)));\n output = Select(\n at_pole, Select(x_is_even_int, ScalarLike(x, inf), ScalarLike(x, nan)),\n output);\n return output;\n };\n return builder.ReportErrorOrReturn([&]() -> absl::StatusOr {\n TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));\n TF_ASSIGN_OR_RETURN(auto q_shape, builder.GetShape(q));\n if (x_shape != q_shape) {\n return InvalidArgument(\n \"Arguments to Zeta must have equal shapes and types; got %s and %s\",\n x_shape.ToString(), q_shape.ToString());\n }\n TF_RETURN_IF_ERROR(EnsureOperandIsRealFp(\"Zeta\", x));\n bool needs_upcast =\n x_shape.element_type() == F16 || x_shape.element_type() == BF16;\n if (needs_upcast) {\n x = ConvertElementType(x, F32);\n q = ConvertElementType(q, F32);\n }\n XlaOp result = doit(x, q, x_shape.element_type());\n if (needs_upcast) {\n result = ConvertElementType(result, x_shape.element_type());\n }\n return result;\n });\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/hlo/builder/lib/math.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"xla/array3d.h\"\n#include \"xla/error_spec.h\"\n#include \"xla/hlo/builder/lib/constants.h\"\n#include \"xla/hlo/builder/xla_builder.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/service.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/test.h\"\n#include \"xla/tests/client_library_test_base.h\"\n#include \"xla/tests/test_macros.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace {\nclass MathTest : public ClientLibraryTestBase {\n public:\n ErrorSpec error_spec_{0.0001};\n};\ntemplate \nclass MathTypedTest : public MathTest {\n public:\n void TestLogEdgeCases() {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n Log(AddParam(LiteralUtil::CreateR1({T{0.0}, T{-0.0}}), &b));\n ComputeAndCompareR1(&b,\n {-std::numeric_limits::infinity(),\n -std::numeric_limits::infinity()},\n {}, error_spec_);\n }\n void TestLog1pEdgeCases() {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n Log1p(AddParam(LiteralUtil::CreateR1({T{0.0}, T{-0.0}, T{-1.0}}), &b));\n ComputeAndCompareR1(\n &b, {T{0.0}, T{-0.0}, -std::numeric_limits::infinity()}, {},\n error_spec_);\n }\n void TestIsInfOrNan() {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n auto x =\n ConstantR1(&b, {\n T{0},\n T{100},\n T{-1000},\n T{std::numeric_limits::max()},\n T{std::numeric_limits::lowest()},\n T{std::numeric_limits::infinity()},\n T{-std::numeric_limits::infinity()},\n T{std::numeric_limits::quiet_NaN()},\n T{std::numeric_limits::signaling_NaN()},\n });\n Tuple(&b, {IsFinite(x), IsInf(x), IsPosInf(x), IsNegInf(x), IsNan(x)});\n auto expected = LiteralUtil::MakeTupleOwned(\n LiteralUtil::CreateR1(\n {true, true, true, true, true, false, false, false, false}),\n LiteralUtil::CreateR1(\n {false, false, false, false, false, true, true, false, false}),\n LiteralUtil::CreateR1(\n {false, false, false, false, false, true, false, false, false}),\n LiteralUtil::CreateR1(\n {false, false, false, false, false, false, true, false, false}),\n LiteralUtil::CreateR1(\n {false, false, false, false, false, false, false, true, true}));\n ComputeAndCompareLiteral(&b, expected, {});\n }\n void TestIsNegZero() {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n T inf(std::numeric_limits::infinity());\n T nan(std::numeric_limits::quiet_NaN());\n IsNegZero(AddParam(\n LiteralUtil::CreateR1({T{-0.0}, T{0}, T{1}, T{-1}, inf, -inf, nan}),\n &b));\n ComputeAndCompareLiteral(\n &b,\n LiteralUtil::CreateR1(\n {true, false, false, false, false, false, false}),\n {}, error_spec_);\n }\n void TestSqrtPowInequivalence() {\n SetFastMathDisabled(true);\n mutable_debug_options()->clear_xla_disable_hlo_passes();\n const T inf(std::numeric_limits::infinity());\n const T nan(std::numeric_limits::quiet_NaN());\n XlaBuilder b(TestName());\n auto x = AddParam(LiteralUtil::CreateR1({-inf}), &b);\n ConcatInDim(\n &b, {Sqrt(x), Pow(x, ScalarLike(x, 0.5)), Pow(x, ScalarLike(x, 0.3))},\n 0);\n std::vector expected = {nan, inf, inf};\n ComputeAndCompareR1(&b, expected, {}, error_spec_);\n }\n void TestErfInvEdgeCases() {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n auto x = AddParam(LiteralUtil::CreateR1({T{-1}, T{1}, T{0}}), &b);\n ErfInv(x);\n const T inf(std::numeric_limits::infinity());\n std::vector expected = {-inf, inf, T{0}};\n ComputeAndCompareR1(&b, expected, {}, error_spec_);\n }\n void TestErfEdgeCases() {\n SetFastMathDisabled(true);\n const T kErfInvOneMinusHalfULP = T(3.832506856900711);\n const T inf(std::numeric_limits::infinity());\n XlaBuilder b(TestName());\n auto x = AddParam(LiteralUtil::CreateR1({T{-inf}, T{inf}, T{-0}, T{0},\n T{-kErfInvOneMinusHalfULP},\n T{kErfInvOneMinusHalfULP}}),\n &b);\n Erf(x);\n std::vector expected = {T(-1), T(1), T(-0), T(0), T(-1), T(1)};\n ComputeAndCompareR1(&b, expected, {}, error_spec_);\n }\n};\nusing TestTypes = ::testing::Types;\nTYPED_TEST_CASE(MathTypedTest, TestTypes);\nXLA_TYPED_TEST(MathTypedTest, LogEdgeCases) { this->TestLogEdgeCases(); }\nXLA_TYPED_TEST(MathTypedTest, Log1pEdgeCases) { this->TestLog1pEdgeCases(); }\nXLA_TYPED_TEST(MathTypedTest, IsInfOrNan) { this->TestIsInfOrNan(); }\nXLA_TYPED_TEST(MathTypedTest, IsNegZero) { this->TestIsNegZero(); }\nXLA_TYPED_TEST(MathTypedTest, DISABLED_ON_TPU(SqrtPowInequivalence)) {\n this->TestSqrtPowInequivalence();\n}\nXLA_TYPED_TEST(MathTypedTest, ErfInvEdgeCases) { this->TestErfInvEdgeCases(); }\nXLA_TYPED_TEST(MathTypedTest, ErfEdgeCases) { this->TestErfEdgeCases(); }\nXLA_TEST_F(MathTest, RealFpOnlyOps) {\n for (int64_t i = PrimitiveType_MIN; i <= PrimitiveType_MAX; ++i) {\n auto ty = static_cast(i);\n SCOPED_TRACE(PrimitiveType_Name(ty));\n Shape shape;\n if (ty == U4 || ty == S4) {\n continue;\n }\n if (primitive_util::IsArrayType(ty)) {\n shape = ShapeUtil::MakeShape(ty, {42});\n } else if (ty == PrimitiveType::TUPLE) {\n shape = ShapeUtil::MakeTupleShape({});\n } else if (ty == PrimitiveType::OPAQUE_TYPE) {\n shape = ShapeUtil::MakeOpaqueShape();\n } else if (ty == PrimitiveType::TOKEN) {\n shape = ShapeUtil::MakeTokenShape();\n } else {\n continue;\n }\n for (const auto& test :\n std::vector, std::string>>({\n {IsFinite, \"is_finite\"},\n {IsInf, \"is_inf\"},\n {IsPosInf, \"is_pos_inf\"},\n {IsNegInf, \"is_neg_inf\"},\n {IsNan, \"is_nan\"},\n {Erf, \"erf\"},\n {Erfc, \"erfc\"},\n {Lgamma, \"lgamma\"},\n {Digamma, \"digamma\"},\n {RoundToEven, \"round_to_even\"},\n })) {\n SCOPED_TRACE(test.second);\n XlaBuilder b(TestName());\n XlaOp p = Parameter(&b, 0, shape, \"p0\");\n test.first(p);\n if (primitive_util::IsFloatingPointType(ty)) {\n TF_EXPECT_OK(b.first_error());\n } else {\n EXPECT_FALSE(b.first_error().ok());\n }\n }\n }\n}\nXLA_TEST_F(MathTest, SqrtF32) {\n XlaBuilder builder(TestName());\n Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32);\n std::unique_ptr zero_data =\n client_->TransferToServer(zero_literal).value();\n XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), \"zero\");\n Sqrt(zero);\n ComputeAndCompareR0(&builder, 0.0f, {zero_data.get()}, error_spec_);\n}\nXLA_TEST_F(MathTest, SqrtF64) {\n XlaBuilder builder(TestName());\n Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F64);\n std::unique_ptr zero_data =\n client_->TransferToServer(zero_literal).value();\n XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), \"zero\");\n Sqrt(zero);\n ComputeAndCompareR0(&builder, 0.0f, {zero_data.get()}, error_spec_);\n}\n#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64\nXLA_TEST_F(MathTest, ErfInvF64) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder, {-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1,\n 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9});\n ErfInv(x);\n std::vector expected = {-1.163087153676674, -0.9061938024368231,\n -0.732869077959217, -0.5951160814499948,\n -0.4769362762044698, -0.37080715859355795,\n -0.27246271472675443, -0.1791434546212916,\n -0.08885599049425767, 0.,\n 0.08885599049425777, 0.1791434546212916,\n 0.27246271472675443, 0.37080715859355784,\n 0.4769362762044698, 0.5951160814499948,\n 0.732869077959217, 0.9061938024368231,\n 1.1630871536766736};\n ComputeAndCompareR1(&builder, expected, {}, ErrorSpec{1e-15});\n}\n#endif\nXLA_TEST_F(MathTest, SquareTenValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});\n Square(x);\n std::vector expected = {4.41, 6.76, 6.76, 16., 4.41,\n 5.29, 25., 0.81, 5.76, 2.56};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, ReciprocalTenValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});\n Reciprocal(x);\n std::vector expected = {\n 0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048,\n 0.43478261, -0.2, -1.11111111, -0.41666667, 0.625};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, SqrtZeroes) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {0.0, -0.0});\n Sqrt(x);\n ComputeAndCompareR1(&builder, {0, 0}, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, SqrtSixValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {16.0, 1.0, 1024.0, 0.16, 0.2, 12345});\n Sqrt(x);\n std::vector expected = {4, 1, 32, 0.4, 0.4472, 111.1080};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, CbrtSixF32Values) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});\n Cbrt(x);\n std::vector expected = {2, 1, 16, -4, 1.2, 11};\n ComputeAndCompareR1(&builder, expected, {}, ErrorSpec(0.001));\n}\nXLA_TEST_F(MathTest, CbrtSixF64Values) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});\n Cbrt(x);\n std::vector expected = {2, 1, 16, -4, 1.2, 11};\n ComputeAndCompareR1(&builder, expected, {}, ErrorSpec(0.001));\n}\nXLA_TEST_F(MathTest, SinhSmallValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});\n Sinh(x);\n std::vector expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, AsinhSmallValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});\n Asinh(x);\n std::vector expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, AtanhSmallValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {1e-8, 1e-9, 1e-10, 1e-11});\n Atanh(x);\n std::vector expected = {1e-8, 1e-9, 1e-10, 1e-11};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, Lgamma) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,\n 2.5, -1.5, -3.5, -5.5});\n Lgamma(x);\n std::vector expected = {\n 0,\n 0,\n static_cast(std::log(2)),\n static_cast(std::log(6)),\n static_cast(std::log(24)),\n static_cast(std::log(120)),\n static_cast(std::log(M_PI) / 2),\n static_cast(std::log(M_PI) / 2 - std::log(2)),\n static_cast(std::log(M_PI) / 2 - std::log(4) + std::log(3)),\n static_cast(std::log(M_PI) / 2 - std::log(3) + std::log(4)),\n static_cast(std::log(M_PI) / 2 - std::log(105) + std::log(16)),\n static_cast(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};\n error_spec_ = ErrorSpec{0.001};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\n#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)\nXLA_TEST_F(MathTest, LgammaF16) {\n SetFastMathDisabled(true);\n XlaBuilder b(TestName());\n auto x = ConstantR1(&b, {\n half(-7360.0),\n half(-4066.0),\n half(-5.9605e-08),\n });\n Lgamma(x);\n std::vector expected = {\n std::numeric_limits::infinity(),\n std::numeric_limits::infinity(),\n half(16.64),\n };\n ComputeAndCompareR1(&b, expected, {}, ErrorSpec{0.1});\n}\n#endif\nXLA_TEST_F(MathTest, Digamma) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,\n 2.0, 3.0, 4.0, 6.0, 8.0, 9.0});\n Digamma(x);\n constexpr double euler_mascheroni =\n 0.57721566490153286060651209008240243104215933593992;\n std::vector expected = {\n static_cast(-euler_mascheroni),\n static_cast(-2 * std::log(2) - euler_mascheroni),\n static_cast(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -\n euler_mascheroni),\n static_cast(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),\n static_cast(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -\n 3 * std::log(3) / 2 - euler_mascheroni),\n static_cast(\n -M_PI / 2 - 4 * std::log(2) -\n (M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /\n std::sqrt(2) -\n euler_mascheroni),\n static_cast(1 - euler_mascheroni),\n static_cast(1.5 - euler_mascheroni),\n static_cast(11 / 6.0 - euler_mascheroni),\n static_cast(137 / 60.0 - euler_mascheroni),\n static_cast(363 / 140.0 - euler_mascheroni),\n static_cast(761 / 280.0 - euler_mascheroni)};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, Igamma) {\n XlaBuilder builder(TestName());\n auto a = ConstantR3FromArray3D(\n &builder,\n {{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},\n {1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},\n {1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});\n auto x = ConstantR3FromArray3D(\n &builder,\n {{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},\n {1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},\n {1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});\n Igamma(a, x);\n Array3D expected = {\n {{0.78746926, 0.99940502, 0.98028261, 0.97033807, 0.99054696},\n {0.33265522, 0.99983558, 0.32599159, 0.99923275, 0.99980893},\n {0.74343963, 0.46703197, 0.33923541, 0.99978511, 0.99460685}}};\n ComputeAndCompareR3(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, IgammaSpecialValues) {\n SetFastMathDisabled(true);\n XlaBuilder builder(TestName());\n const float nan = std::numeric_limits::quiet_NaN();\n auto a =\n ConstantR1(&builder, {nan, nan, 0.53327996, -6.00773744602e+37,\n -1.3937809742e+31, -23.351348877});\n auto x = ConstantR1(\n &builder, {nan, 8.97671773, nan, nan, 0.0, 6.02455484352e-39});\n Igamma(a, x);\n std::vector expected = {nan, nan, nan, nan, nan, nan};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\n#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)\nXLA_TEST_F(MathTest, IgammaF16) {\n SetFastMathDisabled(true);\n XlaBuilder builder(TestName());\n auto a = ConstantR3FromArray3D(\n &builder,\n {{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},\n {half(1.79378), half(1.05317), half(0.85049), half(1.3995)},\n {half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});\n Igamma(a, a);\n Array3D expected = {\n {{half(0.7068214), half(0.6041154), half(0.67748886), half(0.60799426)},\n {half(0.599202), half(0.6288743), half(0.64280254), half(0.6121421)},\n {half(0.6220287), half(0.6384635), half(0.6152258), half(0.6072449)}}};\n ComputeAndCompareR3(&builder, expected, {}, ErrorSpec{1e-3});\n}\n#endif\nXLA_TEST_F(MathTest, Igammac) {\n XlaBuilder builder(TestName());\n auto a = ConstantR3FromArray3D(\n &builder,\n {{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},\n {1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},\n {1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});\n auto x = ConstantR3FromArray3D(\n &builder,\n {{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},\n {1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},\n {1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});\n Igammac(a, x);\n Array3D expected = {{{2.12530741e-01, 5.94977775e-04, 1.97173867e-02,\n 2.96619296e-02, 9.45303689e-03},\n {6.67344782e-01, 1.64421996e-04, 6.74008406e-01,\n 7.67252602e-04, 1.91071108e-04},\n {2.56560373e-01, 5.32968026e-01, 6.60764593e-01,\n 2.14889688e-04, 5.39314824e-03}}};\n ComputeAndCompareR3(&builder, expected, {}, error_spec_);\n}\n#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)\nXLA_TEST_F(MathTest, IgammacF16) {\n SetFastMathDisabled(true);\n XlaBuilder builder(TestName());\n auto a = ConstantR3FromArray3D(\n &builder,\n {{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},\n {half(1.79378), half(1.05317), half(0.85049), half(1.3995)},\n {half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});\n Igammac(a, a);\n Array3D expected = {\n {{half(0.29317862), half(0.39588454), half(0.32251117), half(0.39200574)},\n {half(0.40079802), half(0.37112573), half(0.35719746), half(0.3878579)},\n {half(0.3779713), half(0.36153653), half(0.38477424),\n half(0.39275512)}}};\n ComputeAndCompareR3(&builder, expected, {}, ErrorSpec{1e-4});\n}\n#endif\nXLA_TEST_F(MathTest, RoundToEven) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder, {-1.4, -1.5, -2.5, -0.5, 0, 0.5, 1.5, 2.5, 3.5, 4.5});\n RoundToEven(x);\n std::vector expected = {-1.0, -2.0, -2.0, -0.0, 0,\n 0.0, 2.0, 2.0, 4.0, 4.0};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, ErfRejectsComplexInputs) {\n XlaBuilder b(TestName());\n auto x = ConstantR1>(&b, {{0, 0}});\n Erf(x);\n EXPECT_FALSE(b.Build().status().ok());\n}\nXLA_TEST_F(MathTest, ErfcRejectsComplexInputs) {\n XlaBuilder b(TestName());\n auto x = ConstantR1>(&b, {{0, 0}});\n Erfc(x);\n EXPECT_FALSE(b.Build().status().ok());\n}\nXLA_TEST_F(MathTest, LgammaRejectsComplexInputs) {\n XlaBuilder b(TestName());\n auto x = ConstantR1>(&b, {{0, 0}});\n Lgamma(x);\n EXPECT_FALSE(b.Build().status().ok());\n}\nXLA_TEST_F(MathTest, DigammaRejectsComplexInputs) {\n XlaBuilder b(TestName());\n auto x = ConstantR1>(&b, {{0, 0}});\n Digamma(x);\n EXPECT_FALSE(b.Build().status().ok());\n}\nXLA_TEST_F(MathTest, RoundToEvenRejectsComplexInputs) {\n XlaBuilder b(TestName());\n auto x = ConstantR1>(&b, {{0, 0}});\n RoundToEven(x);\n EXPECT_FALSE(b.Build().status().ok());\n}\nXLA_TEST_F(MathTest, BesselI0eFloat) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder,\n {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,\n 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});\n BesselI0e(x);\n std::vector expected = {0.0897803118848,\n 0.0947062952128,\n 0.100544127361,\n 0.107615251671,\n 0.116426221213,\n 0.127833337163,\n 0.143431781857,\n 0.16665743264,\n 0.207001921224,\n 0.308508322554,\n 1.0,\n 0.308508322554,\n 0.207001921224,\n 0.16665743264,\n 0.143431781857,\n 0.127833337163,\n 0.116426221213,\n 0.107615251671,\n 0.100544127361,\n 0.0947062952128,\n 0.0897803118848};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI0eDouble)) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder,\n {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,\n 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});\n BesselI0e(x);\n std::vector expected = {0.0897803118848,\n 0.0947062952128,\n 0.100544127361,\n 0.107615251671,\n 0.116426221213,\n 0.127833337163,\n 0.143431781857,\n 0.16665743264,\n 0.207001921224,\n 0.308508322554,\n 1.0,\n 0.308508322554,\n 0.207001921224,\n 0.16665743264,\n 0.143431781857,\n 0.127833337163,\n 0.116426221213,\n 0.107615251671,\n 0.100544127361,\n 0.0947062952128,\n 0.0897803118848};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, BesselI1eFloat) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder,\n {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,\n 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});\n BesselI1e(x);\n std::vector expected = {-0.0875062221833,\n -0.092036796872,\n -0.0973496147565,\n -0.103697667463,\n -0.11146429929,\n -0.121262681384,\n -0.134142493293,\n -0.152051459309,\n -0.178750839502,\n -0.215269289249,\n 0.0,\n 0.215269289249,\n 0.178750839502,\n 0.152051459309,\n 0.134142493293,\n 0.121262681384,\n 0.11146429929,\n 0.103697667463,\n 0.0973496147565,\n 0.092036796872,\n 0.0875062221833};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI1eDouble)) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(\n &builder,\n {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,\n 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});\n BesselI1e(x);\n std::vector expected = {-0.0875062221833,\n -0.092036796872,\n -0.0973496147565,\n -0.103697667463,\n -0.11146429929,\n -0.121262681384,\n -0.134142493293,\n -0.152051459309,\n -0.178750839502,\n -0.215269289249,\n 0.0,\n 0.215269289249,\n 0.178750839502,\n 0.152051459309,\n 0.134142493293,\n 0.121262681384,\n 0.11146429929,\n 0.103697667463,\n 0.0973496147565,\n 0.092036796872,\n 0.0875062221833};\n ComputeAndCompareR1(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, AcosComplexValues) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1>(\n &builder, {{0, 0}, {0, 1}, {1, 1}, {0.8, 0.2}});\n Acos(x);\n std::vector> expected = {\n {1.5707963267948966, 0},\n {1.5707963267948966, -0.881373587019543},\n {0.9045568943023814, -1.0612750619050357},\n {0.7011246914497526, -0.30527648462436596}};\n ComputeAndCompareR1>(&builder, expected, {}, error_spec_);\n}\nXLA_TEST_F(MathTest, ZetaF64) {\n XlaBuilder builder(TestName());\n auto x = ConstantR1(&builder, {2.0});\n auto q = ConstantR1(&builder, {1.0});\n Zeta(x, q);\n std::vector expected = {1.64493406684823};\n ComputeAndCompareR1(&builder, expected, {},\n ErrorSpec{0.00000000000001});\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":241,"cells":{"ID":{"kind":"string","value":"5ac1b344-4708-4ddf-93cf-5d8ed5df8d87"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"reader"},"File Path in Repository":{"kind":"string","value":"tensorflow/cc/saved_model/reader.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/cc/saved_model/reader_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/cc/saved_model/reader.h\"\n#include \n#include \n#include \n#include \n#include \"absl/memory/memory.h\"\n#include \"absl/status/statusor.h\"\n#include \"tensorflow/cc/saved_model/constants.h\"\n#include \"tensorflow/cc/saved_model/metrics.h\"\n#include \"tensorflow/cc/saved_model/util.h\"\n#include \"tensorflow/core/framework/attr_value.pb.h\"\n#include \"tensorflow/core/framework/function.pb.h\"\n#include \"tensorflow/core/framework/graph.pb.h\"\n#include \"tensorflow/core/framework/node_def.pb.h\"\n#include \"tensorflow/core/framework/tensor.pb.h\"\n#include \"tensorflow/core/lib/io/path.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\n#include \"tensorflow/core/lib/strings/strcat.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/file_system_helper.h\"\n#include \"tensorflow/core/platform/path.h\"\n#include \"tensorflow/core/platform/statusor.h\"\n#include \"tensorflow/core/platform/types.h\"\n#include \"tensorflow/core/protobuf/meta_graph.pb.h\"\n#include \"tensorflow/core/protobuf/saved_model.pb.h\"\n#include \"tensorflow/core/util/tensor_bundle/byte_swap_tensor.h\"\n#define IS_OSS true\nnamespace tensorflow {\nabsl::StatusOr FindMetaGraphDef(\n const std::unordered_set& tags, SavedModel* saved_model_proto) {\n LOG(INFO) << \"Reading meta graph with tags { \" << absl::StrJoin(tags, \" \")\n << \" }\";\n for (MetaGraphDef& graph_def : *saved_model_proto->mutable_meta_graphs()) {\n std::unordered_set graph_tags;\n for (const string& tag : graph_def.meta_info_def().tags()) {\n graph_tags.insert(tag);\n }\n if (graph_tags == tags) {\n MetaGraphDef* meta_graph_def = &graph_def;\n if (!port::kLittleEndian) {\n TF_RETURN_IF_ERROR(ByteSwapTensorContentInMetaGraphDef(meta_graph_def));\n }\n return meta_graph_def;\n }\n }\n return Status(\n absl::StatusCode::kNotFound,\n strings::StrCat(\n \"Could not find meta graph def matching supplied tags: { \",\n absl::StrJoin(tags, \" \"),\n \" }. To inspect available tag-sets in the SavedModel, please \"\n \"use the SavedModel CLI: `saved_model_cli`\"));\n}\nStatus ReadSavedModel(absl::string_view export_dir,\n SavedModel* saved_model_proto) {\n LOG(INFO) << \"Reading SavedModel from: \" << export_dir;\n if (IS_OSS) {\n const std::string saved_model_pb_path =\n io::JoinPath(export_dir, kSavedModelFilenamePb);\n TF_ASSIGN_OR_RETURN(\n bool saved_model_pb_exists,\n internal::FileExists(Env::Default(), saved_model_pb_path));\n if (saved_model_pb_exists) {\n Status result = ReadBinaryProto(Env::Default(), saved_model_pb_path,\n saved_model_proto);\n if (result.ok()) {\n metrics::SavedModelReadCount(\n saved_model::GetWriteVersion(*saved_model_proto))\n .IncrementBy(1);\n }\n return result;\n }\n }\n const std::string saved_model_pbtxt_path =\n io::JoinPath(export_dir, kSavedModelFilenamePbTxt);\n auto saved_model_pbtxt_exists =\n internal::FileExists(Env::Default(), saved_model_pbtxt_path);\n if (saved_model_pbtxt_exists.value_or(false)) {\n Status result = ReadTextProto(Env::Default(), saved_model_pbtxt_path,\n saved_model_proto);\n if (result.ok()) {\n metrics::SavedModelReadCount(\n saved_model::GetWriteVersion(*saved_model_proto))\n .IncrementBy(1);\n }\n return result;\n }\n if (!IS_OSS) {\n }\n return Status(\n absl::StatusCode::kNotFound,\n strings::StrCat(\"Could not find SavedModel .pb or .pbtxt at supplied \"\n \"export directory path: \",\n export_dir,\n \". Check that \"\n \"the directory exists and that you have the right \"\n \"permissions for accessing it.\"));\n}\nStatus ReadMetaGraphDefFromSavedModel(absl::string_view export_dir,\n const std::unordered_set& tags,\n MetaGraphDef* const meta_graph_def) {\n SavedModel saved_model_proto;\n TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));\n TF_ASSIGN_OR_RETURN(MetaGraphDef * m,\n FindMetaGraphDef(tags, &saved_model_proto));\n *meta_graph_def = std::move(*m);\n return absl::OkStatus();\n}\nStatus ReadSavedModelDebugInfoIfPresent(\n absl::string_view export_dir,\n std::unique_ptr* debug_info_proto) {\n LOG(INFO) << \"Reading SavedModel debug info (if present) from: \"\n << export_dir;\n const string debug_info_pb_path =\n io::JoinPath(export_dir, \"debug\", \"saved_model_debug_info.pb\");\n TF_ASSIGN_OR_RETURN(bool debug_info_pb_exists,\n internal::FileExists(Env::Default(), debug_info_pb_path));\n if (debug_info_pb_exists) {\n GraphDebugInfo debug_info;\n TF_RETURN_IF_ERROR(\n ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info));\n *debug_info_proto = std::make_unique(std::move(debug_info));\n }\n return absl::OkStatus();\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/cc/saved_model/reader.h\"\n#include \n#include \"tensorflow/cc/saved_model/constants.h\"\n#include \"tensorflow/cc/saved_model/metrics.h\"\n#include \"tensorflow/cc/saved_model/tag_constants.h\"\n#include \"tensorflow/core/lib/core/status.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/io/path.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\n#include \"tensorflow/core/platform/path.h\"\n#include \"tensorflow/core/platform/resource_loader.h\"\nnamespace tensorflow {\nnamespace {\nstring TestDataPbTxt() {\n return io::JoinPath(\"tensorflow\", \"cc\", \"saved_model\", \"testdata\",\n \"half_plus_two_pbtxt\", \"00000123\");\n}\nstring TestDataSharded() {\n return io::JoinPath(\"tensorflow\", \"cc\", \"saved_model\", \"testdata\",\n \"half_plus_two\", \"00000123\");\n}\nstring ChunkedSavedModel() {\n return io::JoinPath(\"tensorflow\", \"cc\", \"saved_model\", \"testdata\",\n \"chunked_saved_model\", \"chunked_model\");\n}\nstring NonChunkedSavedModel() {\n return io::JoinPath(\"tensorflow\", \"cc\", \"saved_model\", \"testdata\",\n \"chunked_saved_model\", \"non_chunked_model\");\n}\nclass ReaderTest : public ::testing::Test {\n protected:\n ReaderTest() {}\n void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {\n const auto& tags = meta_graph_def.meta_info_def().tags();\n EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=\n tags.end());\n EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), \"\");\n EXPECT_EQ(\n meta_graph_def.signature_def().at(\"serving_default\").method_name(),\n \"tensorflow/serving/predict\");\n }\n};\nTEST_F(ReaderTest, TagMatch) {\n MetaGraphDef meta_graph_def;\n const string export_dir = GetDataDependencyFilepath(TestDataSharded());\n TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},\n &meta_graph_def));\n CheckMetaGraphDef(meta_graph_def);\n}\nTEST_F(ReaderTest, NoTagMatch) {\n MetaGraphDef meta_graph_def;\n const string export_dir = GetDataDependencyFilepath(TestDataSharded());\n Status st = ReadMetaGraphDefFromSavedModel(export_dir, {\"missing-tag\"},\n &meta_graph_def);\n EXPECT_FALSE(st.ok());\n EXPECT_TRUE(absl::StrContains(\n st.message(),\n \"Could not find meta graph def matching supplied tags: { missing-tag }\"))\n << st.message();\n}\nTEST_F(ReaderTest, NoTagMatchMultiple) {\n MetaGraphDef meta_graph_def;\n const string export_dir = GetDataDependencyFilepath(TestDataSharded());\n Status st = ReadMetaGraphDefFromSavedModel(\n export_dir, {kSavedModelTagServe, \"missing-tag\"}, &meta_graph_def);\n EXPECT_FALSE(st.ok());\n EXPECT_TRUE(absl::StrContains(\n st.message(), \"Could not find meta graph def matching supplied tags: \"))\n << st.message();\n}\nTEST_F(ReaderTest, InvalidExportPath) {\n MetaGraphDef meta_graph_def;\n const string export_dir = GetDataDependencyFilepath(\"missing-path\");\n Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},\n &meta_graph_def);\n EXPECT_FALSE(st.ok());\n}\nTEST_F(ReaderTest, ReadSavedModelDebugInfoIfPresent) {\n const string export_dir = GetDataDependencyFilepath(TestDataSharded());\n std::unique_ptr debug_info_proto;\n TF_ASSERT_OK(ReadSavedModelDebugInfoIfPresent(export_dir, &debug_info_proto));\n}\nTEST_F(ReaderTest, MetricsNotUpdatedFailedRead) {\n MetaGraphDef meta_graph_def;\n const int read_count_v1 = metrics::SavedModelReadCount(\"1\").value();\n const int read_count_v2 = metrics::SavedModelReadCount(\"2\").value();\n const string export_dir = GetDataDependencyFilepath(\"missing-path\");\n Status st =\n ReadMetaGraphDefFromSavedModel(export_dir, {\"serve\"}, &meta_graph_def);\n EXPECT_FALSE(st.ok());\n EXPECT_EQ(metrics::SavedModelReadCount(\"1\").value(), read_count_v1);\n EXPECT_EQ(metrics::SavedModelReadCount(\"2\").value(), read_count_v2);\n}\nTEST_F(ReaderTest, MetricsUpdatedSuccessfulRead) {\n MetaGraphDef meta_graph_def;\n const int read_count_v1 = metrics::SavedModelReadCount(\"1\").value();\n const string export_dir = GetDataDependencyFilepath(TestDataSharded());\n Status st =\n ReadMetaGraphDefFromSavedModel(export_dir, {\"serve\"}, &meta_graph_def);\n EXPECT_EQ(metrics::SavedModelReadCount(\"1\").value(), read_count_v1 + 1);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":242,"cells":{"ID":{"kind":"string","value":"60b3f1ce-5ea1-464a-9c6d-070957e277da"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_bisect_state"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc"},"Code":{"kind":"string","value":"#include \"xla/tools/hlo_bisect/hlo_bisect_state.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace bisect {\nnamespace {\nstd::vector GetModifiedInstructionPostOrder(\n HloComputation* computation) {\n std::vector instructions(\n computation->parameter_instructions().begin(),\n computation->parameter_instructions().end());\n absl::c_copy_if(computation->MakeInstructionPostOrder(),\n std::back_inserter(instructions),\n [&](const HloInstruction* instr) {\n return instr->opcode() != HloOpcode::kParameter;\n });\n return instructions;\n}\nabsl::Status MorphModuleWithOutputs(HloModule* module,\n absl::Span outputs) {\n HloComputation* entry_computation = module->entry_computation();\n HloInstruction* new_root = outputs.size() == 1\n ? outputs[0]\n : entry_computation->AddInstruction(\n HloInstruction::CreateTuple(outputs));\n entry_computation->set_root_instruction(new_root, true);\n *module->mutable_entry_computation_layout() =\n module->compute_computation_layout();\n HloDCE dce;\n absl::StatusOr dce_result = dce.Run(module);\n return dce_result.status();\n}\nabsl::Status MorphModuleWithInstructions(\n HloModule* module, absl::Span instructions) {\n ConstHloInstructionSet in_range_instructions(instructions.begin(),\n instructions.end());\n auto keep_result = [&](const HloInstruction* instruction) {\n return instruction->opcode() != HloOpcode::kParameter &&\n !absl::c_any_of(instruction->users(),\n [&](const HloInstruction* user) {\n return in_range_instructions.count(user) != 0;\n });\n };\n std::vector outputs;\n absl::c_copy_if(instructions, std::back_inserter(outputs), keep_result);\n return MorphModuleWithOutputs(module, outputs);\n}\nabsl::Status MorphModuleWithInstructions(HloModule* module,\n size_t num_instructions) {\n std::vector ordered_instructions =\n GetModifiedInstructionPostOrder(module->entry_computation());\n HloInstruction* const* instructions_begin = &ordered_instructions.front();\n return MorphModuleWithInstructions(\n module, absl::MakeSpan(instructions_begin, num_instructions));\n}\nabsl::Status MorphModuleWithLiterals(\n HloModule* module, absl::flat_hash_map literal_map) {\n HloComputation* entry_computation = module->entry_computation();\n absl::flat_hash_map replace_map;\n for (HloInstruction* instruction : entry_computation->instructions()) {\n auto it = literal_map.find(instruction->name());\n if (it != literal_map.end()) {\n replace_map.emplace(instruction, std::move(it->second));\n }\n }\n for (auto& [instruction, literal] : replace_map) {\n if (!instruction->IsDead()) {\n HloInstruction* new_instruction = entry_computation->AddInstruction(\n HloInstruction::CreateConstant(std::move(literal)));\n absl::Status replace_status =\n entry_computation->ReplaceInstruction(instruction, new_instruction);\n TF_RETURN_IF_ERROR(replace_status);\n }\n }\n xla::HloDCE dce;\n absl::StatusOr dce_status = dce.Run(module);\n return dce_status.status();\n}\nbool InstructionNotReplaceableWithConstant(HloInstruction* instruction) {\n return instruction->shape().is_dynamic() ||\n instruction->opcode() == HloOpcode::kConstant ||\n instruction->opcode() == HloOpcode::kTuple ||\n instruction->opcode() == HloOpcode::kParameter;\n}\n} \nabsl::StatusOr HloBisectState::ShouldProcess() {\n return RunModule(*module_);\n}\nabsl::StatusOr HloBisectState::TrimEntryComputation() {\n bool changed_in_loop = false;\n bool changed = false;\n for (int iter = 0; changed || iter < 2; iter++) {\n if (iter % 2 == 0) {\n VLOG(2) << \"Trimming by outputs, iteration \" << iter;\n TF_ASSIGN_OR_RETURN(changed, TrimByOutputs());\n } else {\n VLOG(2) << \"Trimming by instructions, iteration \" << iter;\n TF_ASSIGN_OR_RETURN(changed, TrimByInstructions());\n }\n changed_in_loop |= changed;\n }\n VLOG(2) << \"Trimming by replacing instructions with literals\";\n TF_ASSIGN_OR_RETURN(changed, TrimByUsingConstants());\n VLOG(2) << \"Final module: \" << module_->ToString();\n return changed || changed_in_loop;\n}\nstd::unique_ptr&& HloBisectState::GetResult() {\n return std::move(module_);\n}\nabsl::StatusOr HloBisectState::RunModule(const HloModule& module) {\n VLOG(3) << \"Modified module: \" << module.ToString();\n absl::StatusOr bug_result = bug_checker_->Run(module);\n TF_RETURN_IF_ERROR(bug_result.status());\n VLOG(3) << \"Bug checker result: \" << bug_result.value();\n if (!bug_result.value()) {\n for (HloInstruction* instr : module.entry_computation()->instructions()) {\n foldable_instructions_.emplace(instr->name());\n }\n for (auto& [key, value] : bug_checker_->GetResults()) {\n foldable_instructions_values_[key] = std::move(value);\n }\n }\n return bug_result;\n}\nabsl::StatusOr HloBisectState::TrimByOutputs() {\n HloInstruction* root_instruction =\n module_->entry_computation()->root_instruction();\n if (root_instruction->opcode() != HloOpcode::kTuple ||\n root_instruction->operand_count() < 2) {\n return false;\n }\n auto run_modified = [&](int64_t start, int64_t end) -> absl::StatusOr {\n std::unique_ptr new_module = module_->Clone(\"\");\n HloInstruction* const* new_operands =\n new_module->entry_computation()->root_instruction()->operands().begin();\n TF_RETURN_IF_ERROR(MorphModuleWithOutputs(\n new_module.get(),\n absl::MakeSpan(new_operands + start, end - start + 1)));\n return RunModule(*new_module);\n };\n int64_t bisect_low = 0;\n int64_t bisect_high = root_instruction->operand_count() - 1;\n while (bisect_low < bisect_high) {\n int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;\n VLOG(2) << \"Number of outputs: \" << (cur - bisect_low + 1) << \" [\"\n << bisect_low << \"..\" << cur << \"]\";\n TF_ASSIGN_OR_RETURN(bool has_bug, run_modified(bisect_low, cur));\n if (has_bug) {\n bisect_high = cur;\n } else {\n TF_ASSIGN_OR_RETURN(has_bug, run_modified(cur + 1, bisect_high));\n if (has_bug) {\n bisect_low = cur + 1;\n } else {\n break;\n }\n }\n }\n bool changed =\n (bisect_high - bisect_low) < (root_instruction->operand_count() - 1);\n if (changed) {\n TF_RETURN_IF_ERROR(MorphModuleWithOutputs(\n module_.get(),\n absl::MakeSpan(root_instruction->operands().begin() + bisect_low,\n bisect_high - bisect_low + 1)));\n TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());\n }\n return changed;\n}\nabsl::StatusOr HloBisectState::TrimByInstructions() {\n HloComputation* computation = module_->entry_computation();\n int64_t upper_bound = computation->instruction_count() -\n computation->root_instruction()->shape().IsTuple();\n int64_t bisect_low = computation->num_parameters() - 1;\n int64_t bisect_high = upper_bound;\n while (bisect_low + 1 < bisect_high) {\n int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;\n VLOG(2) << \"Number of instructions: \" << cur << \" (of \"\n << computation->instruction_count() << \")\";\n std::unique_ptr new_module = module_->Clone(\"\");\n TF_RETURN_IF_ERROR(MorphModuleWithInstructions(new_module.get(), cur));\n TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));\n if (has_bug) {\n bisect_high = cur;\n } else {\n bisect_low = cur;\n }\n }\n if (bisect_high == computation->num_parameters()) {\n return Internal(\n \"The checker fails on an empty computation! Something is not right. \"\n \"Can't bisect.\");\n }\n bool changed = bisect_high < upper_bound;\n if (changed) {\n TF_RETURN_IF_ERROR(MorphModuleWithInstructions(module_.get(), bisect_high));\n TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());\n }\n return changed;\n}\nabsl::StatusOr HloBisectState::TrimByUsingConstants() {\n absl::flat_hash_map literal_map;\n int64_t random_literals_count = 0;\n for (HloInstruction* instr : module_->entry_computation()->instructions()) {\n if (InstructionNotReplaceableWithConstant(instr)) {\n continue;\n }\n if (foldable_instructions_values_.contains(instr->name())) {\n auto it = foldable_instructions_values_.extract(instr->name());\n literal_map.insert(std::move(it));\n } else if (foldable_instructions_.contains(instr->name())) {\n absl::StatusOr literal_status = MakeFakeLiteral(instr->shape());\n TF_RETURN_IF_ERROR(literal_status.status());\n literal_map[instr->name()] = std::move(literal_status).value();\n ++random_literals_count;\n }\n }\n VLOG(2) << \"Number of literals: \" << literal_map.size()\n << \" (random: \" << random_literals_count << \")\";\n std::unique_ptr new_module = module_->Clone(\"\");\n TF_RETURN_IF_ERROR(\n MorphModuleWithLiterals(new_module.get(), std::move(literal_map)));\n TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));\n if (has_bug) {\n std::swap(module_, new_module);\n }\n return has_bug;\n}\nabsl::Status HloBisectState::ExpectModuleIsBuggy() {\n TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*module_));\n if (has_bug) {\n return absl::OkStatus();\n }\n const int retry_count = 5;\n int bug_count = 0;\n for (int i = 0; i < retry_count; i++) {\n TF_ASSIGN_OR_RETURN(has_bug, bug_checker_->Run(*module_));\n if (has_bug) {\n bug_count++;\n }\n }\n if (bug_count != 0) {\n return InternalStrCat(\"The checker is non deterministic! (only \", bug_count,\n \" failures seen in \", (retry_count + 1), \" runs)\");\n }\n return Internal(\"We \\\"lost\\\" the bug while bisecting!\");\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/tools/hlo_bisect/hlo_bisect_state.h\"\n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/literal.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace bisect {\nnamespace {\nnamespace m = match;\nusing HloBisectStateTest = HloTestBase;\nclass TestBugSearch : public BugCheckerInterface {\n public:\n TestBugSearch(std::initializer_list opcodes) : opcodes_(opcodes) {}\n absl::StatusOr Run(const HloModule& module) override {\n auto has_opcode = [&](HloOpcode opcode) {\n return absl::c_any_of(module.entry_computation()->instructions(),\n [opcode](const HloInstruction* instr) {\n return instr->opcode() == opcode;\n });\n };\n return absl::c_all_of(opcodes_, has_opcode);\n }\n absl::flat_hash_map GetResults() override { return {}; }\n private:\n std::vector opcodes_;\n};\nLiteral CreateLiteral(float value) {\n Literal result = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {}));\n result.PopulateWithValue(value);\n return result;\n}\nTEST_F(HloBisectStateTest, TrimByOutputs) {\n const char* kModuleStr = R\"(\n HloModule test_module\n ENTRY test_computation {\n p1 = s32[8] parameter(0)\n p2 = s32[8] parameter(1)\n a = s32[8] add(p1, p2)\n b = s32[8] multiply(p1, p2)\n c = s32[8] subtract(p1, p2)\n ROOT sum = tuple(a, b, c)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n TestBugSearch bug_checker({HloOpcode::kMultiply});\n HloBisectState bisect(std::move(module), &bug_checker);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());\n EXPECT_TRUE(changed);\n auto reduced_module = std::move(bisect).GetResult();\n EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),\n GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));\n}\nTEST_F(HloBisectStateTest, TrimByInstructions) {\n const char* kModuleStr = R\"(\n HloModule axpy_module\n ENTRY axpy_computation {\n alpha = f32[] parameter(0)\n broadcast = f32[10] broadcast(alpha), dimensions={}\n x = f32[10] parameter(1)\n ax = f32[10] multiply(broadcast, x)\n y = f32[10] parameter(2)\n ROOT add = f32[10] add(ax, y)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n TestBugSearch bug_checker({HloOpcode::kMultiply, HloOpcode::kBroadcast});\n HloBisectState bisect(std::move(module), &bug_checker);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());\n EXPECT_TRUE(changed);\n auto reduced_module = std::move(bisect).GetResult();\n EXPECT_THAT(\n reduced_module->entry_computation()->root_instruction(),\n GmockMatch(m::Multiply(m::Broadcast(m::Parameter(0)), m::Parameter(1))));\n}\nTEST_F(HloBisectStateTest, TrimByUsingRandomConstants) {\n const char* kModuleStr = R\"(\n HloModule test_module\n ENTRY test_computation {\n p1 = f32[4] parameter(0)\n p2 = f32[4] parameter(1)\n a = f32[4] multiply(p1, p2)\n b = f32[4] add(p1, p2)\n ROOT result = f32[4] power(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n TestBugSearch bug_checker({HloOpcode::kPower});\n HloBisectState bisect(std::move(module), &bug_checker);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());\n EXPECT_TRUE(changed);\n auto reduced_module = std::move(bisect).GetResult();\n EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),\n GmockMatch(m::Power(m::Constant(), m::Constant())));\n}\nTEST_F(HloBisectStateTest, TrimByUsingReferenceConstants) {\n class TestBugSearchWithReferenceConstants : public TestBugSearch {\n public:\n TestBugSearchWithReferenceConstants()\n : TestBugSearch({HloOpcode::kPower}) {}\n absl::flat_hash_map GetResults() override {\n absl::flat_hash_map results;\n results[\"a\"] = CreateLiteral(2.0f);\n results[\"b\"] = CreateLiteral(3.0f);\n return results;\n }\n };\n const char* kModuleStr = R\"(\n HloModule test_module\n ENTRY test_computation {\n p1 = f32[] parameter(0)\n p2 = f32[] parameter(1)\n a = f32[] multiply(p1, p2)\n b = f32[] add(p1, p2)\n ROOT result = f32[] power(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n TestBugSearchWithReferenceConstants bug_checker;\n HloBisectState bisect(std::move(module), &bug_checker);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());\n EXPECT_TRUE(changed);\n auto reduced_module = std::move(bisect).GetResult();\n EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),\n GmockMatch(m::Power(m::Constant(), m::Constant())));\n}\nTEST_F(HloBisectStateTest, TrimByOutputsLostBug) {\n class CustomBugSearch : public TestBugSearch {\n public:\n CustomBugSearch() : TestBugSearch({HloOpcode::kConstant}) {}\n absl::StatusOr Run(const HloModule& module) override {\n TF_ASSIGN_OR_RETURN(bool has_constants, TestBugSearch::Run(module));\n int program_size = module.entry_computation()->instruction_count();\n return program_size == 5 && !has_constants;\n }\n };\n const char* kModuleStr = R\"(\n HloModule test_module\n ENTRY test_computation {\n p1 = s32[8] parameter(0)\n p2 = s32[8] parameter(1)\n a = s32[8] add(p1, p2)\n b = s32[8] multiply(p1, p2)\n ROOT sum = tuple(a, b)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(kModuleStr));\n CustomBugSearch bug_checker;\n HloBisectState bisect(std::move(module), &bug_checker);\n TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());\n EXPECT_FALSE(changed);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":243,"cells":{"ID":{"kind":"string","value":"01655d2e-ef4c-487f-b26b-7a6ff14cd974"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_expand"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/tools/hlo_expand.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tools/tests/hlo_expand_test.cc"},"Code":{"kind":"string","value":"#include \"xla/tools/hlo_expand.h\"\n#include \n#include \"xla/hlo/pass/hlo_pass_pipeline.h\"\n#include \"xla/service/batchnorm_expander.h\"\n#include \"xla/service/cholesky_expander.h\"\n#include \"xla/service/hlo.pb.h\"\n#include \"xla/service/hlo_verifier.h\"\n#include \"xla/service/rng_bit_generator_expander.h\"\n#include \"xla/service/rng_expander.h\"\n#include \"xla/service/sharding_propagation.h\"\n#include \"xla/service/spmd/stateful_rng_spmd_partitioner.h\"\n#include \"xla/service/triangular_solve_expander.h\"\n#include \"xla/tsl/util/command_line_flags.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nvoid AddPassesToPipeline(HloExpandConfig& config, HloPassPipeline& pipeline,\n const HloModuleConfig& hlo_module_config) {\n if (config.batch_norm_grad_expander || config.batch_norm_inference_expander ||\n config.batch_norm_training_expander) {\n pipeline.AddPass(\n config.batch_norm_training_expander,\n config.batch_norm_inference_expander,\n config.batch_norm_grad_expander);\n }\n if (config.cholesky_expander) {\n pipeline.AddPass();\n }\n if (config.rng_expander) {\n pipeline.AddPass();\n }\n if (config.rng_bit_generator_philox_expander) {\n pipeline.AddPass(\n xla::RandomAlgorithm::RNG_PHILOX);\n }\n if (config.rng_bit_generator_three_fry_expander) {\n pipeline.AddPass(\n xla::RandomAlgorithm::RNG_THREE_FRY);\n }\n if (config.triangular_solve_expander) {\n pipeline.AddPass();\n }\n if (config.spmd_expander) {\n pipeline.AddPass(\n true, false,\n hlo_module_config.allow_spmd_sharding_propagation_to_output(),\n hlo_module_config.allow_spmd_sharding_propagation_to_parameters());\n pipeline.AddPass(\n hlo_module_config.num_partitions(), hlo_module_config.replica_count(),\n hlo_module_config.debug_options()\n .xla_gpu_threshold_for_windowed_einsum_mib());\n }\n if (config.verify_hlo) {\n pipeline.AddPass(false,\n false);\n }\n}\nstd::vector GetFlags(HloExpandConfig& config) {\n return {\n tsl::Flag(\"h\", &config.help, \"Alias of --help\"),\n tsl::Flag(\"help\", &config.help, \"Display available options\"),\n tsl::Flag(\n \"input_format\", &config.input_format,\n \"The format of the input file. If this flag is not specified, it's\"\n \"inferred from the file extension instead. Valid values:\\n \"\n \"* hlo|txt : HLO textual format\\n\"\n \"* pb : xla::HloProto in binary proto format\\n\"\n \"* pbtxt : xla::HloProto in text proto format\"),\n tsl::Flag(\"o\", &config.output_file, \"Alias of --output_file=\"),\n tsl::Flag(\"output_file\", &config.output_file, \"Full output file path\"),\n tsl::Flag(\"output_format\", &config.output_format,\n \"The format of the output file. Defaults to input_format. \"\n \"Valid values:\\n\"\n \"* hlo|txt : HLO textual format\\n\"\n \"* pb : xla::HloProto in binary proto format\\n\"\n \"* pbtxt : xla::HloProto in text proto format\"),\n tsl::Flag(\"batch_norm_expander\", &config.batch_norm_expander,\n \"Overrides and expands batch_norm_grad, batch_norm_inference, \"\n \"and batch_norm_training ops\"),\n tsl::Flag(\"batch_norm_grad_expander\", &config.batch_norm_grad_expander,\n \"Expands batch_norm_grad op\"),\n tsl::Flag(\"batch_norm_inference_expander\",\n &config.batch_norm_inference_expander,\n \"Expands batch_norm_inference_grad op\"),\n tsl::Flag(\"batch_norm_training_expander\",\n &config.batch_norm_training_expander,\n \"Expands batch_norm_training_grad op\"),\n tsl::Flag(\"cholesky_expander\", &config.cholesky_expander,\n \"Expands cholesky op\"),\n tsl::Flag(\"spmd_expander\", &config.spmd_expander,\n \"Expands SPMD sharding\"),\n tsl::Flag(\"expand_all\", &config.expand_all,\n \"Overrides and expands all supported passes below\"),\n tsl::Flag(\"rng_expander\", &config.rng_expander, \"Expands rng op\"),\n tsl::Flag(\n \"rng_bit_generator_expander\", &config.rng_bit_generator_expander,\n \"Overrides and expands rng_bit_generator op on all prng algorithms\"),\n tsl::Flag(\"rng_bit_generator_philox_expander\",\n &config.rng_bit_generator_philox_expander,\n \"Expands rng_bit_generator op using philox prng algorithm\"),\n tsl::Flag(\"rng_bit_generator_three_fry_expander\",\n &config.rng_bit_generator_three_fry_expander,\n \"Expands rng_bit_generator op using three_fry prng algorithm\"),\n tsl::Flag(\"triangular_solve_expander\", &config.triangular_solve_expander,\n \"Expands triangular_solve op\"),\n tsl::Flag(\"verify_hlo\", &config.verify_hlo,\n \"Run HLO verifier after passes\"),\n };\n}\nvoid ParseCompoundFlags(HloExpandConfig& config) {\n config.batch_norm_grad_expander |=\n config.expand_all || config.batch_norm_expander;\n config.batch_norm_inference_expander |=\n config.expand_all || config.batch_norm_expander;\n config.batch_norm_training_expander |=\n config.expand_all || config.batch_norm_expander;\n config.cholesky_expander |= config.expand_all;\n config.rng_bit_generator_philox_expander |=\n config.expand_all || config.rng_bit_generator_expander;\n config.rng_bit_generator_three_fry_expander |=\n config.expand_all || config.rng_bit_generator_expander;\n config.rng_expander |= config.expand_all;\n config.triangular_solve_expander |= config.expand_all;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/subprocess.h\"\n#include \"tsl/platform/test.h\"\nnamespace xla {\nnamespace {\nclass HloExpandTest : public ::testing::Test {\n protected:\n void HloOpt(std::vector& additional_flags) {\n std::string hlo_opt_bin =\n tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\", \"hlo-expand\");\n tsl::SubProcess proc;\n std::vector argv = {hlo_opt_bin};\n argv.insert(argv.end(), additional_flags.begin(), additional_flags.end());\n proc.SetProgram(hlo_opt_bin, argv);\n proc.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);\n proc.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);\n EXPECT_TRUE(proc.Start());\n stdout_output_ = stderr_output_ = \"\";\n int status = proc.Communicate(nullptr, &stdout_output_, &stderr_output_);\n#if defined(_WIN32) || defined(_WIN64)\n exited_normally_ = (status == 0);\n exit_status_ = status;\n#else\n exited_normally_ = WIFEXITED(status);\n exit_status_ = exited_normally_ ? WEXITSTATUS(status) : -1;\n#endif \n }\n std::string stdout_output_;\n std::string stderr_output_;\n bool exited_normally_ = false;\n int exit_status_ = -1;\n};\nTEST_F(HloExpandTest, CholeskyHlo) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"cholesky.hlo\");\n std::vector additional_flags = {\"--input_format=hlo\", hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_hlo_string =\n R\"(HloModule main, entry_computation_layout={()->f64[3,3]{1,0}}\nENTRY %main.3 () -> f64[3,3] {\n %constant.1 = f64[3,3]{1,0} constant({ { 1, 2, 3 }, { 2, 20, 26 }, { 3, 26, 70 } })\n ROOT %cholesky.2 = f64[3,3]{1,0} cholesky(f64[3,3]{1,0} %constant.1), lower=true\n})\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 0);\n EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));\n}\nTEST_F(HloExpandTest, SpmdHlo) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"spmd.hlo\");\n std::vector additional_flags = {\"--spmd_expander\", hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_hlo_string =\n R\"(HloModule module, entry_computation_layout={(f32[24,64]{1,0}, f32[39296,64]{1,0})->f32[24,19648]{1,0}}, num_partitions=2\nENTRY %entry_spmd (param: f32[24,64], param.1: f32[39296,64]) -> f32[24,19648] {\n %param = f32[24,64]{1,0} parameter(0), sharding={replicated}\n %lhs.copy.1 = f32[24,64]{1,0} copy(f32[24,64]{1,0} %param)\n %param.1 = f32[39296,64]{1,0} parameter(1), sharding={replicated}\n %constant = s32[2]{0} constant({0, 19648})\n %partition-id = u32[] partition-id()\n %dynamic-slice = s32[1]{0} dynamic-slice(s32[2]{0} %constant, u32[] %partition-id), dynamic_slice_sizes={1}\n %reshape = s32[] reshape(s32[1]{0} %dynamic-slice)\n %constant.1 = s32[] constant(0)\n %dynamic-slice.1 = f32[19648,64]{1,0} dynamic-slice(f32[39296,64]{1,0} %param.1, s32[] %reshape, s32[] %constant.1), dynamic_slice_sizes={19648,64}\n %rhs.copy.1 = f32[19648,64]{1,0} copy(f32[19648,64]{1,0} %dynamic-slice.1)\n ROOT %dot.1 = f32[24,19648]{1,0} dot(f32[24,64]{1,0} %lhs.copy.1, f32[19648,64]{1,0} %rhs.copy.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}\n})\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 0);\n EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));\n}\nTEST_F(HloExpandTest, CholeskyExpanderHlo) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"cholesky.hlo\");\n std::vector additional_flags = {\"--input_format=hlo\", hlo_path,\n \"--expand_all\"};\n HloOpt(additional_flags);\n const std::string& expected_hlo_string = \"%xla.cholesky_f64\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 0);\n EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));\n}\nTEST_F(HloExpandTest, InvalidArgc) {\n std::vector additional_flags = {\"--input_format=hlo\", \"foo\",\n \"bar\", \"baz\"};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"Cannot parse more than one argument. See usage below:\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, InvalidInputFileExtension) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"foo.bar\");\n std::vector additional_flags = {hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"input_format must be specified as [hlo|pb|pbtxt|txt].\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, InvalidInputFormat) {\n std::vector additional_flags = {\"--input_format=foo\"};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"input_format must be specified as [hlo|pb|pbtxt|txt].\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, InvalidOutputFileExtension) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"cholesky.hlo\");\n std::string output_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(),\n \"tools\", \"tests\", \"foo.bar\");\n std::vector additional_flags = {\"--input_format=\", hlo_path,\n \"--output_file=\" + output_path};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"output_format must be specified as [hlo|pb|pbtxt].\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, InvalidOutputFormat) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"cholesky.hlo\");\n std::vector additional_flags = {\"--input_format=\", hlo_path,\n \"--output_format=foo\"};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"output_format must be specified as [hlo|pb|pbtxt].\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, InvalidFile) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"foo.bar\");\n std::vector additional_flags = {\"--input_format=hlo\", hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_string = \"Try: hlo-expand --help\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, UnsupportedOutputFormat) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"cholesky.hlo\");\n std::vector additional_flags = {\"--input_format=hlo\",\n \"--output_format=pb\", hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"Printing to stdout must specify supported \"\n \"output_format=[hlo|pbtxt|txt].\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\nTEST_F(HloExpandTest, VerificationFailure) {\n std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), \"tools\",\n \"tests\", \"invalid_concat.hlo\");\n std::vector additional_flags = {\"--verify_hlo\", hlo_path};\n HloOpt(additional_flags);\n const std::string& expected_string =\n \"Cannot concatenate arrays that differ in dimensions\";\n EXPECT_TRUE(exited_normally_);\n EXPECT_EQ(exit_status_, 1);\n EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_expand.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/tests/hlo_expand_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":244,"cells":{"ID":{"kind":"string","value":"079d69b0-65c5-4dbf-b05a-cfe05100b0de"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stable_delegate_plugin"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/acceleration/configuration/stable_delegate_plugin.h\"\nnamespace tflite {\nnamespace delegates {\nTFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(StableDelegatePlugin,\n StableDelegatePlugin::New);\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \"pthreadpool.h\" \n#include \"tensorflow/lite/acceleration/configuration/configuration_generated.h\"\n#include \"tensorflow/lite/core/acceleration/configuration/delegate_registry.h\"\n#include \"tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h\"\nnamespace tflite {\nclass StableDelegatePluginTest : public testing::Test {\n public:\n static constexpr int kNumThreadsForTest = 7;\n static constexpr tflite::XNNPackFlags kFlagsForTest =\n tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;\n static constexpr char kDelegateBinaryPath[] =\n \"tensorflow/lite/delegates/utils/experimental/\"\n \"stable_delegate/libtensorflowlite_stable_xnnpack_delegate.so\";\n void SetUp() override {\n flatbuffers::Offset stable_delegate_path_offset =\n flatbuffer_builder_.CreateString(kDelegateBinaryPath);\n StableDelegateLoaderSettingsBuilder stable_delegate_loader_settings_builder(\n flatbuffer_builder_);\n stable_delegate_loader_settings_builder.add_delegate_path(\n stable_delegate_path_offset);\n flatbuffers::Offset\n stable_delegate_loader_settings =\n stable_delegate_loader_settings_builder.Finish();\n XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);\n xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);\n xnnpack_settings_builder.add_flags(kFlagsForTest);\n flatbuffers::Offset xnnpack_settings =\n xnnpack_settings_builder.Finish();\n TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);\n tflite_settings_builder.add_stable_delegate_loader_settings(\n stable_delegate_loader_settings);\n tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);\n tflite_settings_builder.add_delegate(Delegate_XNNPACK);\n flatbuffers::Offset tflite_settings =\n tflite_settings_builder.Finish();\n flatbuffer_builder_.Finish(tflite_settings);\n tflite_settings_ = flatbuffers::GetRoot(\n flatbuffer_builder_.GetBufferPointer());\n delegate_plugin_ = delegates::DelegatePluginRegistry::CreateByName(\n \"StableDelegatePlugin\", *tflite_settings_);\n ASSERT_NE(delegate_plugin_, nullptr);\n }\n void TearDown() override { delegate_plugin_.reset(); }\n protected:\n flatbuffers::FlatBufferBuilder flatbuffer_builder_;\n const TFLiteSettings *tflite_settings_;\n std::unique_ptr delegate_plugin_;\n};\nTEST_F(StableDelegatePluginTest, CanCreateAndDestroyDelegate) {\n delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();\n EXPECT_NE(delegate, nullptr);\n}\nTEST_F(StableDelegatePluginTest, CanGetDelegateErrno) {\n delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();\n EXPECT_EQ(delegate_plugin_->GetDelegateErrno(delegate.get()), 0);\n}\nTEST_F(StableDelegatePluginTest, SetsCorrectThreadCount) {\n delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();\n pthreadpool_t threadpool = static_cast(\n TfLiteXNNPackDelegateGetThreadPool(delegate.get()));\n EXPECT_EQ(pthreadpool_get_threads_count(threadpool), kNumThreadsForTest);\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":245,"cells":{"ID":{"kind":"string","value":"c396709c-ef47-4f4e-8cf4-224ba0257c00"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"composite_device"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/common_runtime/composite_device.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/common_runtime/composite_device_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/composite_device.h\"\n#include \"absl/strings/str_join.h\"\n#include \"tensorflow/core/util/device_name_utils.h\"\nnamespace tensorflow {\nconst char* const kCompositeDeviceType = \"COMPOSITE\";\nstd::unique_ptr CompositeDevice::MakeDevice(\n const std::vector& underlying_devices, const int unique_device_id,\n const DeviceNameUtils::ParsedName& host_name, Status* status) {\n DeviceNameUtils::ParsedName parsed_name = host_name;\n parsed_name.type = kCompositeDeviceType;\n parsed_name.id = unique_device_id;\n const string device_name = DeviceNameUtils::ParsedNameToString(parsed_name);\n return CompositeDevice::MakeDevice(underlying_devices, device_name, status);\n}\nstd::unique_ptr CompositeDevice::MakeDevice(\n const std::vector& underlying_devices, const string& device_name,\n Status* status) {\n if (underlying_devices.empty()) {\n status->Update(\n errors::InvalidArgument(\"underlying_devices should not be empty.\"));\n return nullptr;\n }\n DeviceNameUtils::ParsedName parsed_name;\n if (!DeviceNameUtils::ParseFullName(underlying_devices.at(0), &parsed_name)) {\n status->Update(tensorflow::errors::InvalidArgument(\n \"Cannot parse device name \", underlying_devices.at(0),\n \" when creating CompositeDevice.\"));\n return nullptr;\n }\n const string& underlying_type = parsed_name.type;\n for (int i = 1; i < underlying_devices.size(); ++i) {\n DeviceNameUtils::ParsedName name;\n if (!DeviceNameUtils::ParseFullName(underlying_devices.at(i), &name)) {\n status->Update(tensorflow::errors::InvalidArgument(\n \"Cannot parse device name \", underlying_devices.at(i),\n \" when creating CompositeDevice.\"));\n return nullptr;\n }\n if (name.type != underlying_type) {\n status->Update(tensorflow::errors::InvalidArgument(\n \"Expect device type \", parsed_name.type, \"; but got type \", name.type,\n \" from device: \", underlying_devices.at(i),\n \" when creating CompositeDevice.\"));\n return nullptr;\n }\n }\n DeviceAttributes device_attributes;\n device_attributes.set_name(device_name);\n device_attributes.set_device_type(kCompositeDeviceType);\n return absl::WrapUnique(\n new CompositeDevice(device_attributes, underlying_devices));\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/common_runtime/composite_device.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\nnamespace tensorflow {\nTEST(CompositeDeviceTest, Basic) {\n const string host_name = \"/job:localhost/replica:0/task:0/device:CPU:0\";\n DeviceNameUtils::ParsedName parsed_host_name;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(host_name, &parsed_host_name));\n std::vector underlying_devices;\n {\n Status status;\n std::unique_ptr composite_device =\n CompositeDevice::MakeDevice(underlying_devices, 0,\n parsed_host_name, &status);\n EXPECT_EQ(composite_device, nullptr);\n EXPECT_EQ(error::INVALID_ARGUMENT, status.code());\n EXPECT_TRUE(absl::StrContains(status.message(),\n \"underlying_devices should not be empty\"))\n << status.ToString();\n }\n {\n Status status;\n underlying_devices.push_back(\n \"/job:localhost/replica:0/task:0/device:CPU:0\");\n underlying_devices.push_back(\n \"/job:localhost/replica:0/task:0/device:CPU:1\");\n std::unique_ptr composite_device =\n CompositeDevice::MakeDevice(underlying_devices, 0,\n parsed_host_name, &status);\n TF_ASSERT_OK(status);\n EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);\n EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());\n }\n {\n Status status;\n underlying_devices.push_back(\n \"/job:localhost/replica:0/task:0/device:GPU:0\");\n std::unique_ptr composite_device =\n CompositeDevice::MakeDevice(underlying_devices, 1,\n parsed_host_name, &status);\n EXPECT_EQ(composite_device, nullptr);\n EXPECT_EQ(error::INVALID_ARGUMENT, status.code());\n EXPECT_TRUE(absl::StrContains(status.message(),\n \"Expect device type CPU; but got type GPU\"))\n << status.ToString();\n }\n}\nTEST(CompositeDeviceTest, DeviceName) {\n const string composite_device_name =\n \"/job:localhost/replica:0/task:0/device:CPU:10\";\n std::vector underlying_devices;\n underlying_devices.push_back(\"/job:worker/replica:0/task:0/device:CPU:0\");\n underlying_devices.push_back(\"/job:worker/replica:0/task:0/device:CPU:1\");\n Status status;\n std::unique_ptr composite_device =\n CompositeDevice::MakeDevice(underlying_devices, composite_device_name,\n &status);\n TF_ASSERT_OK(status);\n EXPECT_EQ(composite_device->name(), composite_device_name);\n EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);\n EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":246,"cells":{"ID":{"kind":"string","value":"53602328-c7cf-4c6d-8ee9-b9779b3bed8b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"ar_crs_combiner"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/ar_crs_combiner.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/ar_crs_combiner_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/ar_crs_combiner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/literal.h\"\n#include \"xla/literal_util.h\"\n#include \"xla/service/call_graph.h\"\n#include \"xla/service/hlo_replication_analysis.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nabsl::StatusOr ReplaceReplicatedAllReduce(HloModule* module,\n int64_t partition_count) {\n TF_ASSIGN_OR_RETURN(\n auto replication_analysis,\n HloReplicationAnalysis::Run(module, true));\n bool changed = false;\n int64_t next_channel = hlo_query::NextChannelId(*module);\n for (auto computation : module->computations()) {\n for (auto instruction : computation->instructions()) {\n if (auto ar = DynCast(instruction)) {\n const Shape& shape = ar->shape();\n if (ar->channel_id()) {\n continue;\n }\n if (ar->replica_groups().size() > 1) {\n continue;\n }\n if (shape.IsTuple() || shape.element_type() != F32) {\n continue;\n }\n if (module->config().replica_count() < 8 * partition_count) {\n continue;\n }\n if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) {\n VLOG(2) << \"Replaced replicated all-reduce:\" << ar->ToString();\n ar->set_channel_id(next_channel++);\n auto divisor =\n computation->AddInstruction(HloInstruction::CreateConstant(\n LiteralUtil::CreateR0(partition_count)));\n auto bcast = computation->AddInstruction(\n HloInstruction::CreateBroadcast(shape, divisor, {}));\n auto div = computation->AddInstruction(HloInstruction::CreateBinary(\n ar->shape(), HloOpcode::kDivide, ar, bcast));\n TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div));\n changed = true;\n }\n }\n }\n }\n return changed;\n}\nbool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) {\n auto all_reduce = Cast(hlo);\n auto replica_groups = all_reduce->replica_groups();\n const int64_t replica_count = hlo->GetModule()->config().replica_count();\n CHECK(all_reduce->IsCrossModuleAllReduce());\n if (all_reduce->use_global_device_ids()) {\n if (replica_groups.size() != replica_count) {\n return false;\n }\n for (const auto& group : replica_groups) {\n if (group.replica_ids_size() != num_partitions) {\n return false;\n }\n absl::flat_hash_set partition_ids;\n int64_t replica_id = group.replica_ids(0) / num_partitions;\n for (int64_t i = 0; i < num_partitions; ++i) {\n if (group.replica_ids(i) / num_partitions != replica_id) {\n return false;\n }\n partition_ids.insert(group.replica_ids(i) % num_partitions);\n }\n if (partition_ids.size() != num_partitions) {\n return false;\n }\n }\n return true;\n }\n return replica_groups.size() == replica_count;\n}\n} \nnamespace m = match;\nstd::optional ArCrsCombiner::MatchesArCrsPattern(\n HloInstruction* instruction) {\n auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool {\n if (instruction->user_count() != 1) {\n return false;\n }\n switch (instruction->opcode()) {\n case HloOpcode::kBitcast:\n case HloOpcode::kTranspose:\n case HloOpcode::kReshape:\n return true;\n case HloOpcode::kConvert:\n return ShapeUtil::ElementIsFloating(instruction->shape()) ==\n ShapeUtil::ElementIsFloating(instruction->operand(0)->shape());\n case HloOpcode::kAdd:\n case HloOpcode::kSubtract:\n case HloOpcode::kMultiply:\n return ShapeUtil::ElementIsFloating(instruction->shape());\n default:\n return false;\n }\n };\n auto computation_is_addition = [](HloComputation* c) {\n return c->instruction_count() == 3 &&\n Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter()));\n };\n if (instruction->IsCrossModuleAllReduce() &&\n HasCombinableReplicaGroup(instruction, num_spatial_partitions_) &&\n computation_is_addition(instruction->called_computations()[0]) &&\n instruction->user_count() == 1) {\n auto next = instruction->users()[0];\n int64_t distance = 1;\n while (!next->IsCrossReplicaAllReduce()) {\n if (can_ar_move_past_instruction(next)) {\n next = next->users()[0];\n } else {\n return std::nullopt;\n }\n ++distance;\n }\n if (!Cast(next)->IsNoop() &&\n computation_is_addition(next->called_computations()[0])) {\n ArCrsPair pair(instruction, next, distance);\n VLOG(2) << \"ArCrsPair matching pattern: \" << pair.ToString();\n return pair;\n }\n }\n return std::nullopt;\n}\nstd::optional ArCrsCombiner::WhileFromBodyParameter(\n HloInstruction* instruction) {\n CHECK_EQ(HloOpcode::kParameter, instruction->opcode());\n HloComputation* computation = instruction->parent();\n auto caller_instructions = call_graph_->GetComputationCallers(computation);\n if (caller_instructions.size() == 1) {\n auto caller_instruction = caller_instructions[0];\n if (caller_instruction->opcode() == HloOpcode::kWhile) {\n return caller_instruction;\n }\n }\n return std::nullopt;\n}\nstd::optional ArCrsCombiner::ConditionalFromBodyParameter(\n HloInstruction* instruction) {\n CHECK_EQ(HloOpcode::kParameter, instruction->opcode());\n HloComputation* computation = instruction->parent();\n auto caller_instructions = call_graph_->GetComputationCallers(computation);\n if (caller_instructions.size() == 1) {\n auto caller_instruction = caller_instructions[0];\n if (caller_instruction->opcode() == HloOpcode::kConditional) {\n return caller_instruction;\n }\n }\n return std::nullopt;\n}\nstd::optional> ArCrsCombiner::GetAllTuples(\n HloInstruction* instruction,\n absl::flat_hash_set* visited) {\n if (visited->find(instruction) != visited->end()) {\n return std::vector();\n }\n visited->insert(instruction);\n switch (instruction->opcode()) {\n case HloOpcode::kTuple: {\n return std::vector({instruction});\n }\n case HloOpcode::kDomain: {\n return GetAllTuples(instruction->operands()[0], visited);\n }\n case HloOpcode::kParameter: {\n auto maybe_while = WhileFromBodyParameter(instruction);\n if (maybe_while) {\n auto while_instr = *maybe_while;\n auto init_tuples = GetAllTuples(while_instr->while_init(), visited);\n auto body_tuples = GetAllTuples(\n while_instr->while_body()->root_instruction(), visited);\n if (!init_tuples || !body_tuples) {\n return std::nullopt;\n }\n auto result = *init_tuples;\n result.insert(result.end(), body_tuples->begin(), body_tuples->end());\n return result;\n }\n auto maybe_conditional = ConditionalFromBodyParameter(instruction);\n if (maybe_conditional) {\n auto cond_instr = *maybe_conditional;\n std::vector tuples;\n for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) {\n if (cond_instr->branch_computation(i)->parameter_instruction(0) ==\n instruction) {\n auto branch_tuples =\n GetAllTuples(cond_instr->mutable_operand(i + 1), visited);\n if (!branch_tuples) {\n return std::nullopt;\n }\n tuples.insert(tuples.end(), branch_tuples->begin(),\n branch_tuples->end());\n }\n }\n return tuples;\n }\n return std::nullopt;\n }\n case HloOpcode::kGetTupleElement: {\n std::vector result_tuples;\n auto tuples = GetAllTuples(instruction->operands()[0], visited);\n if (!tuples) {\n return std::nullopt;\n }\n for (auto tuple : *tuples) {\n auto tmp_tuples = GetAllTuples(\n tuple->mutable_operand(instruction->tuple_index()), visited);\n if (!tmp_tuples) {\n return std::nullopt;\n }\n result_tuples.insert(result_tuples.end(), tmp_tuples->begin(),\n tmp_tuples->end());\n }\n return result_tuples;\n }\n case HloOpcode::kConditional: {\n std::vector result_tuples;\n const auto& branch_computations = instruction->branch_computations();\n result_tuples.reserve(branch_computations.size());\n for (HloComputation* body : branch_computations) {\n if (body->root_instruction()->opcode() != HloOpcode::kTuple) {\n return std::nullopt;\n }\n result_tuples.push_back(body->root_instruction());\n }\n return result_tuples;\n }\n case HloOpcode::kWhile: {\n auto init_tuples = GetAllTuples(instruction->while_init(), visited);\n auto body_tuples =\n GetAllTuples(instruction->while_body()->root_instruction(), visited);\n if (!init_tuples || !body_tuples) {\n return std::nullopt;\n }\n auto result = *init_tuples;\n result.insert(result.end(), body_tuples->begin(), body_tuples->end());\n return result;\n }\n default:\n return std::nullopt;\n }\n}\nbool ArCrsCombiner::TupleElementsComputeSameValue(\n HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,\n absl::flat_hash_map* visited_pairs) {\n absl::flat_hash_set visited;\n auto tuples = GetAllTuples(tuple_shaped_instruction, &visited);\n if (!tuples) {\n return false;\n }\n for (auto tuple : *tuples) {\n CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);\n if (!InstructionsComputeSameValue(tuple->mutable_operand(i1),\n tuple->mutable_operand(i2),\n visited_pairs)) {\n return false;\n }\n }\n return true;\n}\nbool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1,\n HloInstruction* i2) {\n ArCrsCombiner combiner(2,\n false);\n auto module = i1->GetModule();\n CHECK_EQ(module, i2->GetModule());\n combiner.call_graph_ = CallGraph::Build(module);\n absl::flat_hash_map visited_pairs;\n return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs);\n}\nbool ArCrsCombiner::InstructionsComputeSameValue(\n HloInstruction* i1, HloInstruction* i2,\n absl::flat_hash_map* visited_pairs) {\n if (i1 == i2) {\n return true;\n }\n auto uid1 = i1->unique_id();\n auto uid2 = i2->unique_id();\n auto min_uid = std::min(uid1, uid2);\n auto max_uid = std::max(uid1, uid2);\n auto it = visited_pairs->find(min_uid);\n if (it != visited_pairs->end() && max_uid == it->second) {\n return true;\n }\n auto opcode1 = i1->opcode();\n auto operands1 = i1->operands();\n if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) {\n return false;\n }\n auto eq_computations = [](const HloComputation* a, const HloComputation* b) {\n return *a == *b;\n };\n auto eq_operands = [](const HloInstruction*, const HloInstruction*) {\n return true;\n };\n if (i1->IsCrossModuleAllReduce()) {\n return i1->Identical(*i2, eq_operands, eq_computations,\n false);\n }\n visited_pairs->emplace(min_uid, max_uid);\n for (int i = 0; i < operands1.size(); ++i) {\n auto operand1 = operands1[i];\n auto operand2 = i2->operands()[i];\n if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) {\n return false;\n }\n }\n if (opcode1 == HloOpcode::kParameter) {\n return false;\n }\n if (opcode1 == HloOpcode::kGetTupleElement) {\n return i1->tuple_index() == i2->tuple_index() ||\n TupleElementsComputeSameValue(operands1[0], i1->tuple_index(),\n i2->tuple_index(), visited_pairs);\n }\n auto eq_instructions = [](const HloInstruction* i1,\n const HloInstruction* i2) -> bool { return true; };\n return i1->Identical(*i2, eq_instructions, eq_computations,\n false);\n}\nvoid ArCrsCombiner::GroupAllReducesById(HloModule* module) {\n absl::flat_hash_set discarded_ar_ids;\n for (HloComputation* computation : module->MakeNonfusionComputations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n auto maybe_pair = MatchesArCrsPattern(instruction);\n if (maybe_pair) {\n auto pair = *maybe_pair;\n int64_t ar_id = *(instruction->channel_id());\n if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) {\n continue;\n }\n auto it = crs_reserved_map_.find(pair.crs);\n if (it != crs_reserved_map_.end()) {\n auto prev_ar_id = it->second;\n CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end());\n CHECK_NE(prev_ar_id, ar_id);\n auto prev_pair = all_reduce_map_[prev_ar_id].back();\n int64_t prev_distance = prev_pair.distance;\n if (prev_distance < pair.distance) {\n VLOG(2) << \"Replacing ArCrsPair: \" << prev_pair.ToString()\n << \" with ArCrsPair: \" << pair.ToString();\n all_reduce_map_.erase(prev_ar_id);\n discarded_ar_ids.insert(prev_ar_id);\n all_reduce_map_[ar_id].push_back(pair);\n crs_reserved_map_[pair.crs] = ar_id;\n } else {\n discarded_ar_ids.insert(ar_id);\n }\n } else {\n if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) {\n int64_t prev_distance = all_reduce_map_[ar_id].back().distance;\n CHECK_EQ(prev_distance, pair.distance)\n << \"All ARs with the same AR ID must have the same distance \"\n \"from the corresponding CRSs. Found: \"\n << prev_distance << \" and \" << pair.distance;\n }\n all_reduce_map_[ar_id].push_back(pair);\n crs_reserved_map_[pair.crs] = ar_id;\n }\n }\n }\n }\n}\nabsl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() {\n for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {\n auto copy_it = it++; \n auto channel_id = copy_it->first;\n VLOG(2)\n << \"KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: \"\n << channel_id << \"\\n\";\n auto pairs_vec = copy_it->second;\n TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_);\n auto instr_0 = pairs_vec[0].ar;\n for (int i = 1; i < pairs_vec.size(); ++i) {\n auto instr_i = pairs_vec[i].ar;\n auto next_0 = instr_0->users()[0];\n auto next_i = instr_i->users()[0];\n absl::flat_hash_map visited_pairs;\n while (true) {\n if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) {\n all_reduce_map_.erase(copy_it);\n VLOG(2) << \"KeepProvablyEqualInstructionGroups. Erased AllReduce \"\n \"channel id: \"\n << channel_id << \"\\n\";\n break;\n }\n if (next_0->IsCrossReplicaAllReduce()) {\n break;\n }\n next_0 = next_0->users()[0];\n next_i = next_i->users()[0];\n }\n }\n }\n return absl::OkStatus();\n}\nabsl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD(\n HloModule* module) {\n TF_ASSIGN_OR_RETURN(\n auto replication_analysis,\n HloReplicationAnalysis::Run(module, true));\n for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {\n auto copy_it = it++; \n auto channel_id = copy_it->first;\n VLOG(2)\n << \"KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: \"\n << channel_id << \"\\n\";\n auto pairs_vec = copy_it->second;\n TF_RET_CHECK(pairs_vec.size() == 1);\n auto instr = pairs_vec[0].ar;\n auto next = instr->users()[0];\n while (true) {\n TF_RET_CHECK(next->shape().IsArray());\n if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) {\n all_reduce_map_.erase(copy_it);\n VLOG(2) << \"KeepProvablyEqualInstructionGroups. Erased AllReduce \"\n \"channel id: \"\n << channel_id << \"\\n\";\n break;\n }\n if (next->IsCrossReplicaAllReduce()) {\n break;\n }\n next = next->users()[0];\n }\n }\n return absl::OkStatus();\n}\nabsl::StatusOr ArCrsCombiner::RewriteGraph() {\n if (all_reduce_map_.empty()) {\n return false;\n }\n for (const auto& it : all_reduce_map_) {\n auto pairs_vec = it.second;\n for (auto pair : pairs_vec) {\n auto all_reduce = pair.ar;\n auto parent_computation = all_reduce->parent();\n auto channel_id = all_reduce->channel_id();\n auto prev = all_reduce->mutable_operand(0);\n auto next = all_reduce->users()[0];\n TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev));\n TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce));\n while (!next->IsCrossReplicaAllReduce()) {\n switch (next->opcode()) {\n case HloOpcode::kBitcast:\n case HloOpcode::kTranspose:\n case HloOpcode::kReshape:\n case HloOpcode::kConvert:\n case HloOpcode::kMultiply:\n break;\n case HloOpcode::kAdd:\n case HloOpcode::kSubtract: {\n auto other_operand = (next->operands()[0] == prev)\n ? next->operands()[1]\n : next->operands()[0];\n if (other_operand->IsCrossModuleAllReduce() &&\n other_operand->user_count() == 1) {\n TF_CHECK_OK(other_operand->ReplaceAllUsesWith(\n other_operand->mutable_operand(0)));\n } else {\n auto shape = other_operand->shape();\n Literal lit(shape);\n lit.PopulateWithValue(num_spatial_partitions_);\n auto divisor = parent_computation->AddInstruction(\n HloInstruction::CreateConstant(lit.Clone()));\n auto division = parent_computation->AddInstruction(\n HloInstruction::CreateBinary(shape, HloOpcode::kDivide,\n other_operand, divisor));\n TF_CHECK_OK(other_operand->ReplaceUseWith(next, division));\n }\n break;\n }\n default:\n LOG(FATAL) << \"Unexpected instruction: \" << next->ToShortString();\n }\n prev = next;\n next = next->users()[0];\n }\n next->set_channel_id(channel_id);\n }\n }\n return true;\n}\nabsl::StatusOr ArCrsCombiner::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n call_graph_ = CallGraph::Build(module);\n GroupAllReducesById(module);\n if (spmd_partition_) {\n TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module));\n } else {\n TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD());\n }\n TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph());\n if (module->config().replica_count() > 1 && spmd_partition_) {\n TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce(\n module, num_spatial_partitions_));\n changed |= replaced;\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/ar_crs_combiner.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass ArCrsCombinerTest : public HloTestBase {};\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}})\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(\n i1, module->entry_computation()->parameter_instruction(0)));\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase2) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) {\n %x = f32[] parameter(0)\n ROOT %tuple = (f32[], f32[]) tuple(%x, %x)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestBasecase3) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %tuple = (f32[], f32[]) tuple(%x, %y)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestNumOperands) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple1 = (f32[2,2]) tuple(%constant.f32)\n %tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {\n %p = f32[2] parameter(0)\n %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}\n %slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]}\n ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {\n %p = f32[2] parameter(0)\n %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}\n %slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]}\n ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) {\n const char* module_str = R\"(\nHloModule foobar\nENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {\n %p = f32[2,2] parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}})\n %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_tuple = module->entry_computation()->root_instruction();\n auto i1 = root_tuple->operands()[0];\n auto i2 = root_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile1) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0];\n auto i2 = body_tuple->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile2) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}})\n %constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0];\n auto i2 = body_tuple->operands()[1];\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestWhile3) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.0 = s32[] constant(0)\n %constant.1 = s32[] constant(1)\n ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT\n}\n%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})\n %constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}})\n %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0\n %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1)\n %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto body_tuple = root_while->while_body()->root_instruction();\n auto i1 = body_tuple->operands()[0]->operands()[0]; \n auto i2 = body_tuple->operands()[1]->operands()[0]; \n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nTEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) {\n const char* module_str = R\"(\nHloModule foobar\n%condition (x: (f32[2,2], f32[2,2])) -> pred[] {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n ROOT %t = pred[] constant(true)\n}\n%body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})\n %gte.1 = f32[2,2] get-tuple-element(%x), index=0\n %gte.2 = f32[2,2] get-tuple-element(%x), index=1\n %add.1 = f32[2,2] add(%gte.1, %constant.f32)\n %add.2 = f32[2,2] add(%gte.2, %constant.f32)\n ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)\n}\n%body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {\n %x = (f32[2,2], f32[2,2]) parameter(0)\n %gte.1 = f32[2,2] get-tuple-element(%x), index=0\n %gte.2 = f32[2,2] get-tuple-element(%x), index=1\n %init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2)\n ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition,\n body=%body_inner\n}\nENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {\n %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})\n %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)\n ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition,\n body=%body_outer\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto root_while = module->entry_computation()->root_instruction();\n auto inner_while = root_while->while_body()->root_instruction();\n auto i1 = inner_while->while_body()->root_instruction()->operands()[0];\n auto i2 = inner_while->while_body()->root_instruction()->operands()[1];\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));\n}\nvoid CompareReplicaGroups(absl::Span groups_before,\n absl::Span groups_after) {\n ASSERT_EQ(groups_before.size(), groups_after.size());\n for (int i = 0; i < groups_before.size(); ++i) {\n auto group_before = groups_before[i];\n std::vector ids_before(group_before.replica_ids().begin(),\n group_before.replica_ids().end());\n auto group_after = groups_after[i];\n std::vector ids_after(group_after.replica_ids().begin(),\n group_after.replica_ids().end());\n EXPECT_EQ(ids_before, ids_after);\n }\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%convert.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Convert(op::Parameter())),\n op::AllReduce(op::Convert(op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2, true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Convert(op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] {\n %a = f32[2,1] parameter(0)\n %b = f32[2,1] parameter(1)\n ROOT %add = f32[2,1] add(%a, %b)\n}\n%sum.2 (x: f32[2], y: f32[2]) -> f32[2] {\n %x = f32[2] parameter(0)\n %y = f32[2] parameter(1)\n ROOT %add = f32[2] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) {\n %p = f32[2,1] parameter(0)\n %all-reduce.ar.1 = f32[2,1]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=0}\n %bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1)\n %all-reduce.1 = f32[2]\n all-reduce(%bitcast.1),\n replica_groups={{0,1}},\n to_apply=%sum.2,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[2,1]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=1}\n %bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2)\n %all-reduce.2 = f32[2]\n all-reduce(%bitcast.2),\n replica_groups={{0,1}},\n to_apply=%sum.2,\n sharding={maximal device=1}\n ROOT %tuple = (f32[2], f32[2])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())),\n op::AllReduce(op::Bitcast(op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %multiply.1 = f32[]\n multiply(%all-reduce.ar.1, %constant.f32),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%multiply.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=1}\n %multiply.2 = f32[]\n multiply(%all-reduce.ar.2, %constant.f32),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%multiply.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())),\n op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.f32\n %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32)\n %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %add.1 = f32[]\n add(%constant.f32, %convert.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %add.2 = f32[]\n add(%constant.f32, %convert.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%add.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(\n op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Convert())),\n op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Convert()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0}\n %add.1 = f32[] add(%constant.f32, %convert.1)\n %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()), op::Convert()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32.1 = f32[] constant(2)\n %constant.f32.2 = f32[] constant(3)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %add.1 = f32[]\n add(%constant.f32.1, %convert.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %add.2 = f32[]\n add(%constant.f32.2, %convert.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%add.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %constant.f32.1 = f32[] constant(2)\n %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %add.1 = f32[] add(%p, %convert.1)\n %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.1 (a: f32[], b: f32[]) -> f32[] {\n %a = f32[] parameter(0)\n %b = f32[] parameter(1)\n ROOT %add = f32[] add(%a, %b)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%all-reduce.ar.1),\n replica_groups={{0,1}},\n to_apply=%sum.1,\n sharding={maximal device=0}\n %multiply.1 = f32[]\n multiply(%all-reduce.1, %constant.f32),\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.1,\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%all-reduce.ar.2),\n replica_groups={{0,1}},\n to_apply=%sum.1,\n sharding={maximal device=1}\n %multiply.2 = f32[]\n multiply(%all-reduce.2, %constant.f32),\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Parameter()),\n op::AllReduce(op::Parameter())));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleAdds) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.1 = f32[] constant(1)\n %constant.2 = f32[] constant(2)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add.11 = f32[]\n add(%constant.1, %all-reduce.ar.1),\n sharding={maximal device=0}\n %add.12 = f32[]\n add(%constant.2, %add.11),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%add.12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add.21 = f32[]\n add(%constant.1, %all-reduce.ar.2),\n sharding={maximal device=0}\n %add.22 = f32[]\n add(%constant.2, %add.21),\n sharding={maximal device=0}\n %all-reduce.2 = f32[]\n all-reduce(%add.22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter()))),\n op::AllReduce(op::Add(\n op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.1 = f32[] constant(1)\n %constant.2 = f32[] constant(2)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum\n %add.11 = f32[] add(%constant.1, %all-reduce.ar.1)\n %add.12 = f32[] add(%constant.2, %add.11)\n %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Add(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %sub.1 = f32[]\n subtract(%constant.f32, %all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%sub.1),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum.f32,\n sharding={maximal device=1}\n %sub.2 = f32[]\n subtract(%constant.f32, %all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%sub.2),\n replica_groups={{0,1}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(\n op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),\n op::Parameter())),\n op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %constant.f32 = f32[] constant(123)\n %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},\n channel_id=1, to_apply=%sum.f32\n %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1)\n %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Subtract(\n op::Divide(op::Constant(), op::Constant()), op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %add11 = f32[]\n add(%ar11, %const1),\n sharding={maximal device=0}\n %ar12 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=0}\n %add12 = f32[]\n add(%add11, %ar12),\n sharding={maximal device=0}\n %crs1 = f32[]\n all-reduce(%add12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %ar21 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=1}\n %add21 = f32[]\n add(%ar21, %const1),\n sharding={maximal device=1}\n %ar22 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=1}\n %add22 = f32[]\n add(%add21, %ar22),\n sharding={maximal device=1}\n %crs2 = f32[]\n all-reduce(%add22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%crs1, %crs2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())),\n op::Parameter())),\n op::AllReduce(op::Add(\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1,\n to_apply=%sum\n %add11 = f32[] add(%ar11, %const1)\n %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2,\n to_apply=%sum\n %add12 = f32[] add(%add11, %ar12)\n %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}},\n to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%crs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())),\n op::Parameter()))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=0}\n %ar12 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=0}\n %add11 = f32[]\n add(%ar12, %const1),\n sharding={maximal device=0}\n %add12 = f32[]\n add(%ar11, %add11),\n sharding={maximal device=0}\n %crs1 = f32[]\n all-reduce(%add12),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=0}\n %ar21 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=1,\n to_apply=%sum,\n sharding={maximal device=1}\n %ar22 = f32[]\n all-reduce(%p),\n replica_groups={{0},{1}},\n channel_id=2,\n to_apply=%sum,\n sharding={maximal device=1}\n %add21 = f32[]\n add(%ar22, %const1),\n sharding={maximal device=1}\n %add22 = f32[]\n add(%ar21, %add21),\n sharding={maximal device=1}\n %crs2 = f32[]\n all-reduce(%add22),\n replica_groups={{0,1}},\n to_apply=%sum,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%crs1, %crs2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant())))),\n op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant()))))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[]) -> (f32[]) {\n %p = f32[] parameter(0)\n %const1 = f32[] constant(1)\n %const2 = f32[] constant(2)\n %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum\n %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum\n %add11 = f32[] add(%ar12, %const1)\n %add12 = f32[] add(%ar11, %add11)\n %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum\n ROOT %tuple = (f32[]) tuple(%crs1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n auto crs_before =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_before = crs_before->replica_groups();\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Tuple(op::AllReduce(op::Add(\n op::Parameter(),\n op::Add(op::Parameter(),\n op::Divide(op::Constant(), op::Constant()))))));\n auto crs_after =\n module->entry_computation()->root_instruction()->operands()[0];\n auto replica_groups_after = crs_after->replica_groups();\n CompareReplicaGroups(replica_groups_before, replica_groups_after);\n}\nTEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[]\n all-reduce(%p),\n replica_groups={{0}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=0}\n %convert.1 = f32[]\n convert(%all-reduce.ar.1),\n sharding={maximal device=0}\n %all-reduce.1 = f32[]\n all-reduce(%convert.1),\n replica_groups={{0}},\n to_apply=%sum.f32,\n sharding={maximal device=0}\n %all-reduce.ar.2 = bf16[]\n all-reduce(%constant.bf16),\n replica_groups={{0}},\n channel_id=1,\n to_apply=%sum.bf16,\n sharding={maximal device=1}\n %convert.2 = f32[]\n convert(%all-reduce.ar.2),\n sharding={maximal device=1}\n %all-reduce.2 = f32[]\n all-reduce(%convert.2),\n replica_groups={{0}},\n to_apply=%sum.f32,\n sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[])\n tuple(%all-reduce.1, %all-reduce.2),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 1));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {\n %a = bf16[] parameter(0)\n %b = bf16[] parameter(1)\n ROOT %add = bf16[] add(%a, %b)\n}\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %constant.bf16 = bf16[] constant(1)\n %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}},\n channel_id=1, to_apply=%sum.bf16\n %convert.1 = f32[] convert(%all-reduce.ar.1)\n %all-reduce.1 = f32[] all-reduce(%convert.1),\n replica_groups={{0}}, to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.1)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 1));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, SameValueTestConditional) {\n const char* module_str = R\"(\nHloModule foobar\nbranch_true {\n pt = (f32[2,4], f32[2,4]) parameter(0)\n gte.0 = f32[2,4] get-tuple-element(pt), index=0\n gte.1 = f32[2,4] get-tuple-element(pt), index=1\n ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0)\n}\nbranch_false {\n pf = (f32[2,4], f32[2,4]) parameter(0)\n gte.0 = f32[2,4] get-tuple-element(pf), index=0\n gte.1 = f32[2,4] get-tuple-element(pf), index=1\n add = f32[2,4] add(gte.1, gte.1)\n ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add)\n}\nENTRY Parameters1.v4 {\n constant = pred[] constant(true)\n p = f32[2,4] parameter(0)\n tuple = (f32[2,4], f32[2,4]) tuple(p, p)\n ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str));\n auto cond = module->entry_computation()->root_instruction();\n auto branch_true = cond->branch_computation(0)->root_instruction();\n auto t0 = branch_true->mutable_operand(0);\n auto t1 = branch_true->mutable_operand(1);\n EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1));\n auto branch_false = cond->branch_computation(1)->root_instruction();\n auto f0 = branch_false->mutable_operand(0);\n auto f1 = branch_false->mutable_operand(1);\n EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1));\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithReplicas) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n %all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=1}\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=0}\n %all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}},\n to_apply=%sum.f32, sharding={maximal device=1}\n ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3),\n sharding={{maximal device=0}, {maximal device=1}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n false);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}},\n to_apply=%sum.f32\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_FALSE(changed);\n}\nTEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] {\n %p = f32[2,4] parameter(0), sharding={replicated}\n ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32,\n replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 32));\n ArCrsCombiner combiner(2,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n auto root = module->entry_computation()->root_instruction();\n EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()),\n op::Broadcast(op::Constant())));\n auto ar = root->operand(0);\n auto divisor = root->operand(1)->operand(0);\n EXPECT_TRUE(ar->channel_id());\n EXPECT_TRUE(divisor->literal().IsAllFloat(2));\n}\nTEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) {\n const char* module_str = R\"(\nHloModule foobar\n%sum.f32 (x: f32[], y: f32[]) -> f32[] {\n %x = f32[] parameter(0)\n %y = f32[] parameter(1)\n ROOT %add = f32[] add(%x, %y)\n}\nENTRY %entrycomp (p: bf16[]) -> (f32[]) {\n %p = bf16[] parameter(0)\n %all-reduce.0 = f32[] all-reduce(%p), channel_id=1,\n replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true,\n to_apply=%sum.f32\n %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},\n to_apply=%sum.f32\n ROOT %tuple = (f32[]) tuple(%all-reduce.2)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr module,\n ParseAndReturnVerifiedModule(module_str, 2,\n 4));\n ArCrsCombiner combiner(4,\n true);\n auto changed = combiner.Run(module.get()).value();\n EXPECT_TRUE(changed);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":247,"cells":{"ID":{"kind":"string","value":"ff6e614c-d7f4-4756-bc9e-cd60d9819967"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"data_transfer"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/data/service/data_transfer.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/data/service/data_transfer_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/data/service/data_transfer.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/strings/str_join.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/framework/variant.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/status.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nmutex* get_lock() {\n static mutex lock(LINKER_INITIALIZED);\n return &lock;\n}\nusing DataTransferServerFactories =\n std::unordered_map;\nDataTransferServerFactories& transfer_server_factories() {\n static auto& factories = *new DataTransferServerFactories();\n return factories;\n}\nusing DataTransferClientFactories =\n std::unordered_map;\nDataTransferClientFactories& transfer_client_factories() {\n static auto& factories = *new DataTransferClientFactories();\n return factories;\n}\n} \nGetElementResult GetElementResult::Copy() const {\n GetElementResult copy;\n copy.components = components;\n copy.element_index = element_index;\n copy.end_of_sequence = end_of_sequence;\n copy.skip = skip;\n return copy;\n}\nsize_t GetElementResult::EstimatedMemoryUsageBytes() const {\n size_t size_bytes = components.size() * sizeof(Tensor) +\n sizeof(element_index) + sizeof(end_of_sequence) +\n sizeof(skip);\n for (const Tensor& tensor : components) {\n size_bytes += tensor.TotalBytes();\n if (tensor.dtype() != DT_VARIANT) {\n continue;\n }\n const Variant& variant = tensor.scalar()();\n const CompressedElement* compressed = variant.get();\n if (compressed) {\n size_bytes += compressed->SpaceUsedLong();\n }\n }\n return size_bytes;\n}\nvoid DataTransferServer::Register(std::string name, ServerFactoryT factory) {\n mutex_lock l(*get_lock());\n if (!transfer_server_factories().insert({name, factory}).second) {\n LOG(ERROR)\n << \"Two data transfer server factories are being registered with name \"\n << name << \". Which one gets used is undefined.\";\n }\n}\nStatus DataTransferServer::Build(std::string name, GetElementT get_element,\n std::shared_ptr* out) {\n mutex_lock l(*get_lock());\n auto it = transfer_server_factories().find(name);\n if (it != transfer_server_factories().end()) {\n return it->second(get_element, out);\n }\n std::vector available_names;\n for (const auto& factory : transfer_server_factories()) {\n available_names.push_back(factory.first);\n }\n return errors::NotFound(\n \"No data transfer server factory has been registered for name \", name,\n \". The available names are: [ \", absl::StrJoin(available_names, \", \"),\n \" ]\");\n}\nvoid DataTransferClient::Register(std::string name, ClientFactoryT factory) {\n mutex_lock l(*get_lock());\n if (!transfer_client_factories().insert({name, factory}).second) {\n LOG(ERROR)\n << \"Two data transfer client factories are being registered with name \"\n << name << \". Which one gets used is undefined.\";\n }\n}\nStatus DataTransferClient::Build(std::string name, Config config,\n std::unique_ptr* out) {\n mutex_lock l(*get_lock());\n auto it = transfer_client_factories().find(name);\n if (it != transfer_client_factories().end()) {\n return it->second(config, out);\n }\n std::vector available_names;\n for (const auto& factory : transfer_client_factories()) {\n available_names.push_back(factory.first);\n }\n return errors::NotFound(\n \"No data transfer client factory has been registered for name \", name,\n \". The available names are: [ \", absl::StrJoin(available_names, \", \"),\n \" ]\");\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/data/service/data_transfer.h\"\n#include \n#include \n#include \n#include \n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_shape.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/protobuf.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nclass TestDataTransferServer : public DataTransferServer {\n public:\n explicit TestDataTransferServer(bool* called) : called_(called) {}\n Status Start(const experimental::WorkerConfig& unused_config) override {\n *called_ = true;\n return absl::OkStatus();\n }\n int Port() const override { return 0; }\n private:\n bool* called_;\n};\ntemplate \nGetElementResult MakeElementResult(T value) {\n GetElementResult result;\n result.components.push_back(Tensor(std::move(value)));\n result.element_index = 0;\n result.end_of_sequence = false;\n return result;\n}\nTEST(DataTransferTest, RegisterDataTransferServerBuilder) {\n bool called = false;\n DataTransferServer::Register(\"test\", [&called](auto ignore, auto* server) {\n *server = std::make_shared(&called);\n return absl::OkStatus();\n });\n std::shared_ptr server;\n TF_ASSERT_OK(DataTransferServer::Build(\"test\", {}, &server));\n EXPECT_FALSE(called);\n TF_ASSERT_OK(server->Start({}));\n EXPECT_TRUE(called);\n}\nTEST(DataTransferTest, EstimateMemoryUsageBytes) {\n GetElementResult empty;\n EXPECT_GT(empty.EstimatedMemoryUsageBytes(), 0);\n Tensor tensor(DT_INT64, TensorShape({10, 100}));\n GetElementResult int64_result = MakeElementResult(tensor);\n EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), 1000 * sizeof(int64_t));\n EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(),\n int64_result.components[0].AllocatedBytes());\n EXPECT_GE(int64_result.EstimatedMemoryUsageBytes(), sizeof(int64_result));\n}\nTEST(DataTransferTest, EstimateVariantMemoryUsageBytes) {\n const size_t data_size = 1000;\n std::unique_ptr compressed{\n protobuf::Arena::Create(nullptr)};\n compressed->set_data(std::string(data_size, 'a'));\n Tensor tensor(DT_VARIANT, TensorShape({}));\n tensor.scalar()() = *compressed;\n GetElementResult variant_result = MakeElementResult(tensor);\n EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), data_size);\n EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),\n compressed->ByteSizeLong());\n EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),\n compressed->SpaceUsedLong());\n}\nTEST(DataTransferTest, CopyGetElementResult) {\n std::string hello_world = \"hello, world!\";\n GetElementResult result = MakeElementResult(hello_world);\n ASSERT_EQ(result.components.size(), 1);\n EXPECT_GT(result.EstimatedMemoryUsageBytes(), hello_world.size());\n GetElementResult copy = result.Copy();\n ASSERT_EQ(copy.components.size(), 1);\n test::ExpectEqual(result.components[0], copy.components[0]);\n EXPECT_EQ(copy.EstimatedMemoryUsageBytes(),\n result.EstimatedMemoryUsageBytes());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":248,"cells":{"ID":{"kind":"string","value":"fca111e8-154f-4c10-bf20-b92bba5d67bb"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/arolla"},"File Name":{"kind":"string","value":"while_loop_impl"},"File Path in Repository":{"kind":"string","value":"arolla/expr/operators/while_loop/while_loop_impl.cc"},"File Path for Unit Test":{"kind":"string","value":"arolla/expr/operators/while_loop/while_loop_impl_test.cc"},"Code":{"kind":"string","value":"#include \"arolla/expr/operators/while_loop/while_loop_impl.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/types/span.h\"\n#include \"arolla/expr/expr.h\"\n#include \"arolla/expr/expr_node.h\"\n#include \"arolla/expr/expr_operator.h\"\n#include \"arolla/expr/expr_visitor.h\"\n#include \"arolla/expr/operators/while_loop/while_loop.h\"\n#include \"arolla/util/status_macros_backport.h\"\nnamespace arolla::expr_operators::while_loop_impl {\nusing ::arolla::expr::ExprNodePtr;\nusing ::arolla::expr::ExprOperatorPtr;\nusing ::arolla::expr::Placeholder;\nabsl::StatusOr> ExtractImmutables(\n const ExprNodePtr& expr, std::function\n immutable_naming_function) {\n NamedExpressions immutables;\n struct Visit {\n ExprNodePtr expr;\n bool has_placeholder_dep;\n bool has_leaf_dep;\n };\n ASSIGN_OR_RETURN(\n (auto [converted_expr, has_placeholder_dep, has_leaf_dep]),\n expr::PostOrderTraverse(\n expr,\n [&](const ExprNodePtr& node,\n absl::Span visits) -> absl::StatusOr {\n if (node->is_placeholder()) {\n return Visit{.expr = node,\n .has_placeholder_dep = true,\n .has_leaf_dep = false};\n }\n if (node->is_leaf()) {\n return Visit{.expr = node,\n .has_placeholder_dep = false,\n .has_leaf_dep = true};\n }\n bool has_placeholder_dep = std::any_of(\n visits.begin(), visits.end(),\n [](const auto& v) { return v->has_placeholder_dep; });\n bool has_leaf_dep =\n std::any_of(visits.begin(), visits.end(),\n [](const auto& v) { return v->has_leaf_dep; });\n if (!has_placeholder_dep) {\n return Visit{.expr = node,\n .has_placeholder_dep = false,\n .has_leaf_dep = has_leaf_dep};\n }\n std::vector new_deps;\n new_deps.reserve(visits.size());\n for (const auto& visit : visits) {\n if (visit->has_placeholder_dep || !visit->has_leaf_dep) {\n new_deps.push_back(visit->expr);\n } else {\n auto placeholder_key = immutable_naming_function(visit->expr);\n new_deps.emplace_back(Placeholder(placeholder_key));\n immutables.emplace(std::move(placeholder_key), visit->expr);\n }\n }\n ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies(\n node, std::move(new_deps)));\n return Visit{.expr = new_node,\n .has_placeholder_dep = true,\n .has_leaf_dep = has_leaf_dep};\n }));\n if (!has_placeholder_dep) {\n DCHECK(immutables.empty());\n auto placeholder_key = immutable_naming_function(converted_expr);\n immutables.emplace(placeholder_key, converted_expr);\n converted_expr = Placeholder(placeholder_key);\n }\n return {{std::move(converted_expr), std::move(immutables)}};\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"arolla/expr/operators/while_loop/while_loop_impl.h\"\n#include \n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/status/status_matchers.h\"\n#include \"absl/strings/str_format.h\"\n#include \"arolla/expr/expr.h\"\n#include \"arolla/expr/expr_node.h\"\n#include \"arolla/expr/testing/testing.h\"\n#include \"arolla/util/fingerprint.h\"\nnamespace arolla::expr_operators::while_loop_impl {\nnamespace {\nusing ::absl_testing::IsOkAndHolds;\nusing ::arolla::expr::CallOp;\nusing ::arolla::expr::ExprNodePtr;\nusing ::arolla::expr::Leaf;\nusing ::arolla::expr::Literal;\nusing ::arolla::expr::Placeholder;\nusing ::arolla::testing::EqualsExpr;\nusing ::testing::IsEmpty;\nusing ::testing::Pair;\nusing ::testing::UnorderedElementsAre;\nTEST(WhileLoopImplTest, ExtractImmutables) {\n absl::flat_hash_map immutable_names;\n auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {\n if (auto it = immutable_names.find(node->fingerprint());\n it != immutable_names.end()) {\n return it->second;\n }\n std::string name = absl::StrFormat(\"_immutable_%d\", immutable_names.size());\n immutable_names.emplace(node->fingerprint(), name);\n return name;\n };\n {\n auto expr = Literal(int64_t{1});\n EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(\n EqualsExpr(Placeholder(\"_immutable_0\")),\n UnorderedElementsAre(Pair(\n \"_immutable_0\", EqualsExpr(Literal(1)))))));\n }\n {\n auto expr = Leaf(\"fifty\");\n EXPECT_THAT(\n ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(EqualsExpr(Placeholder(\"_immutable_1\")),\n UnorderedElementsAre(Pair(\n \"_immutable_1\", EqualsExpr(Leaf(\"fifty\")))))));\n }\n {\n auto expr = Placeholder(\"seven\");\n EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty())));\n }\n {\n ASSERT_OK_AND_ASSIGN(\n auto expr,\n CallOp(\"math.add\",\n {Leaf(\"two\"),\n CallOp(\"math.add\", {Placeholder(\"fifty\"), Leaf(\"seven\")})}));\n EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(\n EqualsExpr(CallOp(\n \"math.add\",\n {Placeholder(\"_immutable_3\"),\n CallOp(\"math.add\", {Placeholder(\"fifty\"),\n Placeholder(\"_immutable_2\")})})),\n UnorderedElementsAre(\n Pair(\"_immutable_3\", EqualsExpr(Leaf(\"two\"))),\n Pair(\"_immutable_2\", EqualsExpr(Leaf(\"seven\")))))));\n }\n {\n ASSERT_OK_AND_ASSIGN(auto expr, CallOp(\"math.add\", {Placeholder(\"fifty\"),\n Literal(7)}));\n EXPECT_THAT(\n ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(EqualsExpr(CallOp(\"math.add\", {Placeholder(\"fifty\"),\n Literal(7)})),\n IsEmpty())));\n }\n {\n ASSERT_OK_AND_ASSIGN(\n auto expr57, CallOp(\"math.add\", {Leaf(\"fifty\"), Literal(7)}));\n ASSERT_OK_AND_ASSIGN(auto expr,\n CallOp(\"math.add\", {expr57, Placeholder(\"two\")}));\n EXPECT_THAT(\n ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(\n EqualsExpr(CallOp(\n \"math.add\", {Placeholder(\"_immutable_4\"), Placeholder(\"two\")})),\n UnorderedElementsAre(Pair(\"_immutable_4\", EqualsExpr(expr57))))));\n }\n {\n ASSERT_OK_AND_ASSIGN(\n auto expr,\n CallOp(\"math.add\",\n {CallOp(\"math.add\", {Placeholder(\"fifty\"), Leaf(\"seven\")}),\n Leaf(\"seven\")}));\n EXPECT_THAT(\n ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(\n EqualsExpr(CallOp(\n \"math.add\", {CallOp(\"math.add\", {Placeholder(\"fifty\"),\n Placeholder(\"_immutable_2\")}),\n Placeholder(\"_immutable_2\")})),\n UnorderedElementsAre(\n Pair(\"_immutable_2\", EqualsExpr(Leaf(\"seven\")))))));\n }\n {\n ASSERT_OK_AND_ASSIGN(\n auto expr,\n CallOp(\"math.add\",\n {CallOp(\"math.add\", {Literal(1), Leaf(\"fifty\")}),\n Placeholder(\"seven\")}));\n EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),\n IsOkAndHolds(Pair(\n EqualsExpr(CallOp(\"math.add\", {Placeholder(\"_immutable_5\"),\n Placeholder(\"seven\")})),\n UnorderedElementsAre(Pair(\n \"_immutable_5\",\n EqualsExpr(CallOp(\"math.add\", {Literal(1),\n Leaf(\"fifty\")})))))));\n }\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl_test.cc"},"Commit Hash":{"kind":"string","value":"1ca990dbeca224035efdabffecc7f3738df6b52c"}}},{"rowIdx":249,"cells":{"ID":{"kind":"string","value":"8e9734e8-2cbe-4ac0-bf10-2ea388d9cbf8"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"gather_nd"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/kernels/gather_nd.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/kernels/gather_nd_test.cc"},"Code":{"kind":"string","value":"#include \n#include \"tensorflow/lite/core/c/c_api_types.h\"\n#include \"tensorflow/lite/core/c/common.h\"\n#include \"tensorflow/lite/kernels/internal/optimized/optimized_ops.h\"\n#include \"tensorflow/lite/kernels/internal/reference/reference_ops.h\"\n#include \"tensorflow/lite/kernels/internal/tensor.h\"\n#include \"tensorflow/lite/kernels/internal/tensor_ctypes.h\"\n#include \"tensorflow/lite/kernels/kernel_util.h\"\nnamespace tflite {\nnamespace ops {\nnamespace builtin {\nnamespace gather_nd {\nconstexpr int kParams = 0;\nconstexpr int kIndices = 1;\nconstexpr int kOutputTensor = 0;\nTfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {\n TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);\n TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);\n const TfLiteTensor* params;\n TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params));\n const TfLiteTensor* indices;\n TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));\n TfLiteTensor* output;\n TF_LITE_ENSURE_OK(context,\n GetOutputSafe(context, node, kOutputTensor, &output));\n switch (params->type) {\n case kTfLiteFloat32:\n case kTfLiteUInt8:\n case kTfLiteInt8:\n case kTfLiteInt16:\n case kTfLiteInt64:\n case kTfLiteInt32:\n case kTfLiteString:\n case kTfLiteBool:\n break;\n default:\n TF_LITE_KERNEL_LOG(context,\n \"Params of type '%s' are not supported by gather_nd.\",\n TfLiteTypeGetName(params->type));\n return kTfLiteError;\n }\n switch (indices->type) {\n case kTfLiteInt64:\n case kTfLiteInt32:\n case kTfLiteInt16:\n break;\n default:\n TF_LITE_KERNEL_LOG(context,\n \"Indices of type '%s' are not supported by gather_nd.\",\n TfLiteTypeGetName(indices->type));\n return kTfLiteError;\n }\n const int params_rank = NumDimensions(params);\n const int indices_rank = NumDimensions(indices);\n const int indices_nd = SizeOfDimension(indices, indices_rank - 1);\n if (params_rank < 1) {\n TF_LITE_KERNEL_LOG(context, \"Params must be at least a vector.\");\n return kTfLiteError;\n }\n if (indices_rank < 1) {\n TF_LITE_KERNEL_LOG(context, \"Indices must be at least a vector.\");\n return kTfLiteError;\n }\n if (indices_nd > params_rank) {\n TF_LITE_KERNEL_LOG(\n context, \"Index innermost dimension length must be <= params rank.\");\n return kTfLiteError;\n }\n output->type = params->type;\n const int output_rank = indices_rank + params_rank - indices_nd - 1;\n TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);\n int output_index = 0;\n for (int i = 0; i < indices_rank - 1; ++i) {\n output_shape->data[output_index++] = indices->dims->data[i];\n }\n for (int i = indices_nd; i < params_rank; ++i) {\n output_shape->data[output_index++] = params->dims->data[i];\n }\n return context->ResizeTensor(context, output, output_shape);\n}\ntemplate \nTfLiteStatus GatherNd(const TfLiteTensor* params, const TfLiteTensor* indices,\n TfLiteTensor* output) {\n return reference_ops::GatherNd(\n GetTensorShape(params), GetTensorData(params),\n GetTensorShape(indices), GetTensorData(indices),\n GetTensorShape(output), GetTensorData(output));\n}\ntemplate \nTfLiteStatus GatherNdString(const TfLiteTensor* params,\n const TfLiteTensor* indices, TfLiteTensor* output) {\n return reference_ops::GatherNdString(\n GetTensorShape(params), params, GetTensorShape(indices),\n GetTensorData(indices), GetTensorShape(output), output);\n}\ntemplate \nTfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,\n const TfLiteTensor* indices, TfLiteTensor* output) {\n bool indices_has_only_positive_elements = true;\n const auto* indices_values = GetTensorData(indices);\n const size_t num_indices = indices->bytes / sizeof(IndicesT);\n for (size_t i = 0; i < num_indices; i++) {\n if (indices_values[i] < 0) {\n indices_has_only_positive_elements = false;\n break;\n }\n }\n TF_LITE_ENSURE(context, indices_has_only_positive_elements);\n TfLiteStatus status = kTfLiteError;\n switch (params->type) {\n case kTfLiteFloat32:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteUInt8:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteInt8:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteInt16:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteInt32:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteInt64:\n status = GatherNd(params, indices, output);\n break;\n case kTfLiteString:\n status = GatherNdString(params, indices, output);\n break;\n case kTfLiteBool:\n status = GatherNd(params, indices, output);\n break;\n default:\n TF_LITE_KERNEL_LOG(context,\n \"Params type '%s' are not supported by gather_nd.\",\n TfLiteTypeGetName(params->type));\n return kTfLiteError;\n }\n if (status != kTfLiteOk) {\n TF_LITE_KERNEL_LOG(context, \"gather_nd index out of bounds\");\n }\n return status;\n}\nTfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {\n const TfLiteTensor* params;\n TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params));\n const TfLiteTensor* indices;\n TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));\n TfLiteTensor* output;\n TF_LITE_ENSURE_OK(context,\n GetOutputSafe(context, node, kOutputTensor, &output));\n TF_LITE_ENSURE(context,\n (NumElements(params) == 0 && NumElements(indices) == 0) ||\n NumElements(params) > 0);\n switch (indices->type) {\n case kTfLiteInt16:\n return EvalGatherNd(context, params, indices, output);\n case kTfLiteInt32:\n return EvalGatherNd(context, params, indices, output);\n case kTfLiteInt64:\n return EvalGatherNd(context, params, indices, output);\n default:\n TF_LITE_KERNEL_LOG(context,\n \"Indices of type '%s' are not supported by gather_nd.\",\n TfLiteTypeGetName(indices->type));\n return kTfLiteError;\n }\n}\n} \nTfLiteRegistration* Register_GATHER_ND() {\n static TfLiteRegistration r = { nullptr, nullptr,\n gather_nd::Prepare, gather_nd::Eval};\n return &r;\n}\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \n#include \"flatbuffers/flatbuffers.h\" \n#include \"tensorflow/lite/kernels/test_util.h\"\n#include \"tensorflow/lite/schema/schema_generated.h\"\n#include \"tensorflow/lite/string_type.h\"\nnamespace tflite {\nnamespace {\nusing ::testing::ElementsAreArray;\nclass GatherNdOpModel : public SingleOpModel {\n public:\n GatherNdOpModel(const TensorData& params, const TensorData& indices) {\n params_ = AddInput(params);\n indices_ = AddInput(indices);\n output_ = AddOutput(params.type);\n SetBuiltinOp(BuiltinOperator_GATHER_ND, BuiltinOptions_GatherNdOptions,\n CreateGatherNdOptions(builder_).Union());\n BuildInterpreter({GetShape(params_), GetShape(indices_)});\n }\n template \n void SetInput(std::initializer_list data) {\n PopulateTensor(params_, data);\n }\n template \n void SetPositions(std::initializer_list data) {\n PopulateTensor(indices_, data);\n }\n template \n std::vector GetOutput() {\n return ExtractVector(output_);\n }\n std::vector GetOutputShape() { return GetTensorShape(output_); }\n protected:\n int params_;\n int indices_;\n int output_;\n};\nTEST(GatherNdOpTest, ElementIndexingIntoMatrix) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({0, 0, 1, 1});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {1.1, 2.2}));\n}\nTEST(GatherNdOpTest, ErrorOnOutOfBoundsTooLarge) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({0, 0, 2, 0});\n EXPECT_EQ(m.Invoke(), kTfLiteError);\n m.SetPositions({0, 0, 1, 2});\n EXPECT_EQ(m.Invoke(), kTfLiteError);\n}\nTEST(GatherNdOpTest, ErrorOnOutOfBoundsNegative) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({1, -1, 1, 1});\n EXPECT_EQ(m.Invoke(), kTfLiteError);\n}\nTEST(GatherNdOpTest, SliceIndexingIntoMatrix) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoMatrix1) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},\n {TensorType_INT32, {2, 1, 1}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoMatrix2) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},\n {TensorType_INT32, {2, 1, 2}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({0, 0, 1, 1});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {1.1, 2.2}));\n}\nTEST(GatherNdOpTest, DuplicateIndexingIntoMatrix) {\n GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, 1.2, 2.1, 2.2});\n m.SetPositions({0, 0, 0, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {1.1, 1.1}));\n}\nTEST(GatherNdOpTest, ElementIndexingIntoRank3Tensor) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {1, 2, 3}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 0, 1, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));\n}\nTEST(GatherNdOpTest, SliceIndexingIntoRank3Tensor) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 1}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 2});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 5.1,\n -5.2, 5.3, 6.1, -6.2, 6.3}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor1) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 1, 3}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 0, 1, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor2) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 1, 1}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {3.1, 3.2, -3.3, -4.1, -4.2, 4.3,\n 1.1, -1.2, 1.3, -2.1, 2.2, 2.3}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor3) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 2, 2}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 1, 1, 0, 0, 0, 2, 1});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3, 1.1,\n -1.2, 1.3, 6.1, -6.2, 6.3}));\n}\nTEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor4) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 2, 3}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 0, 1, 1, 0, 1, 1, 1, 2, 2, 1, 2});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-1.2, 3.2, 4.3, 6.3}));\n}\nTEST(GatherNdOpTest, DuplicateIndexingIntoRank3Tensor) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 1, 0, 1});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, -2.1, 2.2, 2.3}));\n}\nTEST(GatherNdOpTest, Float32Int32) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT32, {2, 2}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));\n}\nTEST(GatherNdOpTest, Float32Int64) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT64, {2, 2}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));\n}\nTEST(GatherNdOpTest, Int32Int32) {\n GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Int32Int64) {\n GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Uint8Int32) {\n GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1, 1, 1, 2, 2, 2, \n 3, 3, 3, 4, 4, 4, \n 5, 5, 5, 6, 6, 6});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 2, 2, 3, 3, 3}));\n}\nTEST(GatherNdOpTest, Uint8Int64) {\n GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({1, 1, 1, 2, 2, 2, \n 3, 3, 3, 4, 4, 4, \n 5, 5, 5, 6, 6, 6});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 2, 2, 3, 3, 3}));\n}\nTEST(GatherNdOpTest, Int8Int32) {\n GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Int8Int64) {\n GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Int16Int32) {\n GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Int16Int64) {\n GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({1, -1, 1, -2, 2, 2, \n 3, 3, -3, -4, -4, 4, \n 5, -5, 5, 6, -6, 6});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));\n}\nTEST(GatherNdOpTest, Int64Int32) {\n GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({1LL, -1LL, 1LL, -2LL, 2LL, 2LL, \n 3LL, 3LL, -3LL, -4LL, -4LL, 4LL, \n 5LL, -5LL, 5LL, 6LL, -6LL, 6LL});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));\n}\nTEST(GatherNdOpTest, Int64Int64) {\n GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({1LL, -1LL, 1LL, -2LL, 2LL, 2LL, \n 3LL, 3LL, -3LL, -4LL, -4LL, 4LL, \n 5LL, -5LL, 5LL, 6LL, -6LL, 6LL});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));\n}\nTEST(GatherNdOpTest, Float32Int16) {\n GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},\n {TensorType_INT16, {2, 2}});\n m.SetInput({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, \n 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, \n 5.1, -5.2, 5.3, 6.1, -6.2, 6.3});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));\n}\nTEST(GatherNdOpTest, StringInt32) {\n GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({\"A\", \"B\", \"C\", \n \"D\", \"E\", \"F\", \n \"G\", \"H\", \"I\", \n \"J\", \"K\", \"L\", \n \"M\", \"N\", \"O\", \n \"P\", \"Q\", \"R\"});\n m.SetPositions({0, 1, 1, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n ElementsAreArray({\"D\", \"E\", \"F\", \"G\", \"H\", \"I\"}));\n}\nTEST(GatherNdOpTest, StringInt64) {\n GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT64, {2, 2}});\n m.SetInput({\"A\", \"B\", \"C\", \n \"D\", \"E\", \"F\", \n \"G\", \"H\", \"I\", \n \"J\", \"K\", \"L\", \n \"M\", \"N\", \"O\", \n \"P\", \"Q\", \"R\"});\n m.SetPositions({0LL, 1LL, 1LL, 0LL});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutput(),\n ElementsAreArray({\"D\", \"E\", \"F\", \"G\", \"H\", \"I\"}));\n}\nTEST(GatherNdOpTest, StringOutOfBoundsTooLarge) {\n GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({\"A\", \"B\", \"C\", \n \"D\", \"E\", \"F\", \n \"G\", \"H\", \"I\", \n \"J\", \"K\", \"L\", \n \"M\", \"N\", \"O\", \n \"P\", \"Q\", \"R\"});\n m.SetPositions({0, 0, 3, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteError);\n m.SetPositions({0, 0, 2, 2});\n ASSERT_EQ(m.Invoke(), kTfLiteError);\n}\nTEST(GatherNdOpTest, StringOutOfBoundsNegative) {\n GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});\n m.SetInput({\"A\", \"B\", \"C\", \n \"D\", \"E\", \"F\", \n \"G\", \"H\", \"I\", \n \"J\", \"K\", \"L\", \n \"M\", \"N\", \"O\", \n \"P\", \"Q\", \"R\"});\n m.SetPositions({1, -1, 0, 0});\n ASSERT_EQ(m.Invoke(), kTfLiteError);\n}\nTEST(GatherNdOpTest, EmptyParamsAndIndex) {\n GatherNdOpModel m({TensorType_FLOAT32, {1, 0}}, {TensorType_INT32, {0, 2}});\n ASSERT_EQ(m.Invoke(), kTfLiteOk);\n EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({0}));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":250,"cells":{"ID":{"kind":"string","value":"e580d97c-eae2-4a33-ad5b-e764f270b769"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"raw_logging"},"File Path in Repository":{"kind":"string","value":"absl/base/internal/raw_logging.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/base/raw_logging_test.cc"},"Code":{"kind":"string","value":"#include \"absl/base/internal/raw_logging.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#ifdef __EMSCRIPTEN__\n#include \n#endif\n#include \"absl/base/attributes.h\"\n#include \"absl/base/config.h\"\n#include \"absl/base/internal/atomic_hook.h\"\n#include \"absl/base/internal/errno_saver.h\"\n#include \"absl/base/log_severity.h\"\n#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \\\n defined(__hexagon__) || defined(__Fuchsia__) || \\\n defined(__native_client__) || defined(__OpenBSD__) || \\\n defined(__EMSCRIPTEN__) || defined(__ASYLO__)\n#include \n#define ABSL_HAVE_POSIX_WRITE 1\n#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1\n#else\n#undef ABSL_HAVE_POSIX_WRITE\n#endif\n#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)\n#include \n#define ABSL_HAVE_SYSCALL_WRITE 1\n#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1\n#else\n#undef ABSL_HAVE_SYSCALL_WRITE\n#endif\n#ifdef _WIN32\n#include \n#define ABSL_HAVE_RAW_IO 1\n#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1\n#else\n#undef ABSL_HAVE_RAW_IO\n#endif\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace raw_log_internal {\nnamespace {\n#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED\nconstexpr char kTruncated[] = \" ... (message truncated)\\n\";\nbool VADoRawLog(char** buf, int* size, const char* format, va_list ap)\n ABSL_PRINTF_ATTRIBUTE(3, 0);\nbool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {\n if (*size < 0) return false;\n int n = vsnprintf(*buf, static_cast(*size), format, ap);\n bool result = true;\n if (n < 0 || n > *size) {\n result = false;\n if (static_cast(*size) > sizeof(kTruncated)) {\n n = *size - static_cast(sizeof(kTruncated));\n } else {\n n = 0; \n }\n }\n *size -= n;\n *buf += n;\n return result;\n}\n#endif \nconstexpr int kLogBufSize = 3000;\nbool DoRawLog(char** buf, int* size, const char* format, ...)\n ABSL_PRINTF_ATTRIBUTE(3, 4);\nbool DoRawLog(char** buf, int* size, const char* format, ...) {\n if (*size < 0) return false;\n va_list ap;\n va_start(ap, format);\n int n = vsnprintf(*buf, static_cast(*size), format, ap);\n va_end(ap);\n if (n < 0 || n > *size) return false;\n *size -= n;\n *buf += n;\n return true;\n}\nbool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line,\n char** buf, int* buf_size) {\n DoRawLog(buf, buf_size, \"[%s : %d] RAW: \", file, line);\n return true;\n}\nABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES\nabsl::base_internal::AtomicHook\n log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);\nABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES\nabsl::base_internal::AtomicHook abort_hook;\nvoid RawLogVA(absl::LogSeverity severity, const char* file, int line,\n const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);\nvoid RawLogVA(absl::LogSeverity severity, const char* file, int line,\n const char* format, va_list ap) {\n char buffer[kLogBufSize];\n char* buf = buffer;\n int size = sizeof(buffer);\n#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED\n bool enabled = true;\n#else\n bool enabled = false;\n#endif\n#ifdef ABSL_MIN_LOG_LEVEL\n if (severity < static_cast(ABSL_MIN_LOG_LEVEL) &&\n severity < absl::LogSeverity::kFatal) {\n enabled = false;\n }\n#endif\n enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);\n const char* const prefix_end = buf;\n#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED\n if (enabled) {\n bool no_chop = VADoRawLog(&buf, &size, format, ap);\n if (no_chop) {\n DoRawLog(&buf, &size, \"\\n\");\n } else {\n DoRawLog(&buf, &size, \"%s\", kTruncated);\n }\n AsyncSignalSafeWriteError(buffer, static_cast(buf - buffer));\n }\n#else\n static_cast(format);\n static_cast(ap);\n static_cast(enabled);\n#endif\n if (severity == absl::LogSeverity::kFatal) {\n abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);\n abort();\n }\n}\nvoid DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,\n const std::string& message) {\n RawLog(severity, file, line, \"%.*s\", static_cast(message.size()),\n message.data());\n}\n} \nvoid AsyncSignalSafeWriteError(const char* s, size_t len) {\n if (!len) return;\n absl::base_internal::ErrnoSaver errno_saver;\n#if defined(__EMSCRIPTEN__)\n if (s[len - 1] == '\\n') {\n len--;\n }\n#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043\n emscripten_errn(s, len);\n#else\n char buf[kLogBufSize];\n if (len >= kLogBufSize) {\n len = kLogBufSize - 1;\n constexpr size_t trunc_len = sizeof(kTruncated) - 2;\n memcpy(buf + len - trunc_len, kTruncated, trunc_len);\n buf[len] = '\\0';\n len -= trunc_len;\n } else {\n buf[len] = '\\0';\n }\n memcpy(buf, s, len);\n _emscripten_err(buf);\n#endif\n#elif defined(ABSL_HAVE_SYSCALL_WRITE)\n syscall(SYS_write, STDERR_FILENO, s, len);\n#elif defined(ABSL_HAVE_POSIX_WRITE)\n write(STDERR_FILENO, s, len);\n#elif defined(ABSL_HAVE_RAW_IO)\n _write( 2, s, static_cast(len));\n#else\n (void)s;\n (void)len;\n#endif\n}\nvoid RawLog(absl::LogSeverity severity, const char* file, int line,\n const char* format, ...) {\n va_list ap;\n va_start(ap, format);\n RawLogVA(severity, file, line, format, ap);\n va_end(ap);\n}\nbool RawLoggingFullySupported() {\n#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED\n return true;\n#else \n return false;\n#endif \n}\nABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL\n absl::base_internal::AtomicHook\n internal_log_function(DefaultInternalLog);\nvoid RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {\n log_filter_and_prefix_hook.Store(func);\n}\nvoid RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }\nvoid RegisterInternalLogFunction(InternalLogFunction func) {\n internal_log_function.Store(func);\n}\n} \nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/base/internal/raw_logging.h\"\n#include \n#include \"gtest/gtest.h\"\n#include \"absl/strings/str_cat.h\"\nnamespace {\nTEST(RawLoggingCompilationTest, Log) {\n ABSL_RAW_LOG(INFO, \"RAW INFO: %d\", 1);\n ABSL_RAW_LOG(INFO, \"RAW INFO: %d %d\", 1, 2);\n ABSL_RAW_LOG(INFO, \"RAW INFO: %d %d %d\", 1, 2, 3);\n ABSL_RAW_LOG(INFO, \"RAW INFO: %d %d %d %d\", 1, 2, 3, 4);\n ABSL_RAW_LOG(INFO, \"RAW INFO: %d %d %d %d %d\", 1, 2, 3, 4, 5);\n ABSL_RAW_LOG(WARNING, \"RAW WARNING: %d\", 1);\n ABSL_RAW_LOG(ERROR, \"RAW ERROR: %d\", 1);\n}\nTEST(RawLoggingCompilationTest, LogWithNulls) {\n ABSL_RAW_LOG(INFO, \"RAW INFO: %s%c%s\", \"Hello\", 0, \"World\");\n}\nTEST(RawLoggingCompilationTest, PassingCheck) {\n ABSL_RAW_CHECK(true, \"RAW CHECK\");\n}\nconst char kExpectedDeathOutput[] = \"\";\nTEST(RawLoggingDeathTest, FailingCheck) {\n EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, \"explanation\"),\n kExpectedDeathOutput);\n}\nTEST(RawLoggingDeathTest, LogFatal) {\n EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, \"my dog has fleas\"),\n kExpectedDeathOutput);\n}\nTEST(InternalLog, CompilationTest) {\n ABSL_INTERNAL_LOG(INFO, \"Internal Log\");\n std::string log_msg = \"Internal Log\";\n ABSL_INTERNAL_LOG(INFO, log_msg);\n ABSL_INTERNAL_LOG(INFO, log_msg + \" 2\");\n float d = 1.1f;\n ABSL_INTERNAL_LOG(INFO, absl::StrCat(\"Internal log \", 3, \" + \", d));\n}\nTEST(InternalLogDeathTest, FailingCheck) {\n EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, \"explanation\"),\n kExpectedDeathOutput);\n}\nTEST(InternalLogDeathTest, LogFatal) {\n EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, \"my dog has fleas\"),\n kExpectedDeathOutput);\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/raw_logging.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/raw_logging_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":251,"cells":{"ID":{"kind":"string","value":"18f87e2e-9b1a-444a-9ef9-eca87df828f3"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"stop_token"},"File Path in Repository":{"kind":"string","value":"tensorstore/util/stop_token.h"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/util/stop_token_test.cc"},"Code":{"kind":"string","value":"#ifndef TENSORSTORE_UTIL_STOP_TOKEN_H_\n#define TENSORSTORE_UTIL_STOP_TOKEN_H_\n#include \n#include \n#include \n#include \n#include \"absl/base/attributes.h\"\n#include \"tensorstore/internal/intrusive_ptr.h\"\n#include \"tensorstore/util/stop_token_impl.h\"\nnamespace tensorstore {\nclass StopSource;\ntemplate \nclass StopCallback;\nclass StopToken {\n public:\n StopToken() noexcept = default;\n ~StopToken() noexcept = default;\n StopToken(const StopToken&) noexcept = default;\n StopToken(StopToken&&) noexcept = default;\n StopToken& operator=(const StopToken&) noexcept = default;\n StopToken& operator=(StopToken&&) noexcept = default;\n [[nodiscard]] bool stop_possible() const noexcept {\n return state_ != nullptr;\n }\n [[nodiscard]] bool stop_requested() const noexcept {\n return state_ != nullptr && state_->stop_requested();\n }\n friend bool operator==(const StopToken& a, const StopToken& b) {\n return a.state_ == b.state_;\n }\n friend bool operator!=(const StopToken& a, const StopToken& b) {\n return !(a == b);\n }\n private:\n friend class StopSource;\n template \n friend class StopCallback;\n StopToken(internal::IntrusivePtr state)\n : state_(std::move(state)) {}\n internal::IntrusivePtr state_{nullptr};\n};\nclass StopSource {\n public:\n StopSource() noexcept\n : state_(internal::MakeIntrusivePtr()) {}\n explicit StopSource(std::nullptr_t) noexcept : state_(nullptr) {}\n ~StopSource() noexcept = default;\n StopSource(const StopSource& b) noexcept = default;\n StopSource(StopSource&&) noexcept = default;\n StopSource& operator=(const StopSource& b) noexcept = default;\n StopSource& operator=(StopSource&&) noexcept = default;\n [[nodiscard]] bool stop_possible() const noexcept {\n return state_ != nullptr;\n }\n [[nodiscard]] bool stop_requested() const noexcept {\n return state_ != nullptr && state_->stop_requested();\n }\n bool request_stop() const noexcept {\n if (state_ != nullptr) {\n return state_->RequestStop();\n }\n return false;\n }\n [[nodiscard]] StopToken get_token() const noexcept {\n return StopToken(state_);\n }\n private:\n internal::IntrusivePtr state_;\n};\ntemplate \nclass StopCallback : private internal_stop_token::StopCallbackBase {\n static_assert(std::is_invocable_v);\n public:\n using callback_type = Callback;\n StopCallback(const StopCallback&) = delete;\n StopCallback& operator=(const StopCallback&) = delete;\n StopCallback(StopCallback&&) = delete;\n StopCallback& operator=(StopCallback&&) = delete;\n template <\n typename... Args,\n std::enable_if_t, int> = 0>\n explicit StopCallback(const StopToken& token, Args&&... args)\n : callback_(std::forward(args)...) {\n internal_stop_token::StopState* state = token.state_.get();\n if (state) {\n invoker_ = &StopCallback::Invoker;\n state->RegisterImpl(*this);\n } \n }\n ~StopCallback() {\n internal_stop_token::StopState* state =\n state_.exchange(nullptr, std::memory_order_acq_rel);\n if (state != nullptr) {\n state->UnregisterImpl(*this);\n }\n }\n private:\n static void Invoker(internal_stop_token::StopCallbackBase& self) noexcept {\n static_cast(static_cast(self).callback_)();\n }\n ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Callback callback_;\n};\ntemplate \nStopCallback(StopToken token, Callback callback) -> StopCallback;\n} \n#endif"},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/util/stop_token.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"tensorstore/internal/testing/concurrent.h\"\nnamespace {\nTEST(StopTokenTest, Invariants) {\n tensorstore::StopSource source;\n EXPECT_TRUE(source.stop_possible());\n EXPECT_FALSE(source.stop_requested());\n tensorstore::StopToken token = source.get_token();\n EXPECT_TRUE(source.stop_possible());\n EXPECT_FALSE(source.stop_requested());\n EXPECT_EQ(token, source.get_token());\n EXPECT_TRUE(source.request_stop());\n EXPECT_TRUE(source.stop_possible());\n EXPECT_TRUE(source.stop_requested());\n EXPECT_TRUE(token.stop_requested());\n {\n tensorstore::StopSource source2;\n EXPECT_NE(token, source2.get_token());\n }\n}\nTEST(StopTokenTest, Invariants_Null) {\n tensorstore::StopSource source(nullptr);\n EXPECT_FALSE(source.stop_possible());\n EXPECT_FALSE(source.stop_requested());\n tensorstore::StopToken token = source.get_token();\n EXPECT_FALSE(source.stop_possible());\n EXPECT_FALSE(source.stop_requested());\n EXPECT_EQ(token, source.get_token());\n EXPECT_FALSE(source.request_stop());\n EXPECT_FALSE(source.stop_possible());\n EXPECT_FALSE(source.stop_requested());\n EXPECT_FALSE(token.stop_requested());\n {\n tensorstore::StopSource source2;\n EXPECT_NE(token, source2.get_token());\n }\n}\nTEST(StopTokenTest, Basic_InScope) {\n tensorstore::StopSource source;\n bool called = false;\n {\n tensorstore::StopCallback callback(source.get_token(),\n [&]() { called = true; });\n EXPECT_FALSE(called);\n EXPECT_TRUE(source.request_stop());\n }\n EXPECT_TRUE(called);\n}\nTEST(StopTokenTest, Basic_NotInScope) {\n tensorstore::StopSource source;\n bool called = false;\n {\n tensorstore::StopCallback callback(source.get_token(),\n [&]() { called = true; });\n EXPECT_FALSE(called);\n }\n EXPECT_TRUE(source.request_stop());\n EXPECT_FALSE(called);\n}\nTEST(StopTokenTest, Basic_Null) {\n tensorstore::StopSource source(nullptr);\n bool called = false;\n {\n tensorstore::StopCallback callback(source.get_token(),\n [&]() { called = true; });\n EXPECT_FALSE(called);\n EXPECT_FALSE(source.request_stop());\n }\n EXPECT_FALSE(called);\n}\nTEST(StopTokenTest, StopAlreadyRequested) {\n tensorstore::StopSource source;\n EXPECT_TRUE(source.request_stop());\n bool called = false;\n tensorstore::StopCallback callback(source.get_token(),\n [&]() { called = true; });\n EXPECT_TRUE(called);\n}\nTEST(StopTokenTest, CallbackOrder) {\n bool called[3] = {};\n auto do_nothing = []() {};\n using DoNothingCallback = tensorstore::StopCallback;\n tensorstore::StopSource source;\n auto x = std::make_unique(source.get_token(), do_nothing);\n tensorstore::StopCallback callback0(source.get_token(), [&]() {\n EXPECT_TRUE(called[1]);\n called[0] = true;\n });\n tensorstore::StopCallback callback1(source.get_token(), [&]() {\n EXPECT_TRUE(called[2]);\n called[1] = true;\n });\n tensorstore::StopCallback callback2(source.get_token(), [&]() {\n EXPECT_FALSE(called[0]);\n called[2] = true;\n });\n { DoNothingCallback tmp(source.get_token(), do_nothing); }\n x = nullptr; \n EXPECT_TRUE(source.request_stop());\n EXPECT_TRUE(called[2]);\n}\nTEST(StopCallbackTest, InvokeValueCategory) {\n struct Callback {\n void operator()() const& { value += 1; }\n void operator()() && { value += 100; }\n int& value;\n };\n tensorstore::StopSource source;\n int counts[3] = {};\n tensorstore::StopCallback stop_callback0(source.get_token(),\n Callback{counts[0]});\n Callback callback1{counts[1]};\n tensorstore::StopCallback stop_callback1(source.get_token(),\n callback1);\n tensorstore::StopCallback stop_callback2(source.get_token(),\n Callback{counts[2]});\n source.request_stop();\n EXPECT_THAT(counts, ::testing::ElementsAre(100, 1, 1));\n}\nTEST(StopTokenTest, SelfDeregister) {\n tensorstore::StopSource source;\n std::optional>> callback{\n std::in_place, source.get_token(), [&] { callback = std::nullopt; }};\n EXPECT_TRUE(source.request_stop());\n EXPECT_FALSE(callback.has_value());\n}\nTEST(StopTokenTest, Concurrent) {\n tensorstore::StopSource source;\n bool called = false;\n std::optional>> callback;\n ::tensorstore::internal_testing::TestConcurrent(\n 100,\n [&] {\n tensorstore::StopSource new_source;\n source = std::move(new_source);\n called = false;\n },\n [&] {\n EXPECT_TRUE(source.stop_requested());\n callback = std::nullopt;\n EXPECT_TRUE(called);\n }, \n [&] { callback.emplace(source.get_token(), [&]() { called = true; }); },\n [&] { source.request_stop(); },\n [&] {\n tensorstore::StopCallback callback(source.get_token(), []() {});\n } \n );\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":252,"cells":{"ID":{"kind":"string","value":"1c5201c2-01d4-4a96-b235-4a452e3fa598"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"semantic_version"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/stream_executor/semantic_version.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/stream_executor/semantic_version_test.cc"},"Code":{"kind":"string","value":"#include \"xla/stream_executor/semantic_version.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace stream_executor {\nstd::string SemanticVersion::ToString() const {\n return absl::StrFormat(\"%d.%d.%d\", major_, minor_, patch_);\n}\nstatic absl::StatusOr ParseUnsignedNumber(\n absl::string_view component) {\n unsigned number;\n if (!absl::SimpleAtoi(component, &number)) {\n return absl::InvalidArgumentError(\n absl::StrFormat(\"'%s' is not an unsigned number.\", component));\n }\n return number;\n}\nabsl::StatusOr SemanticVersion::ParseFromString(\n absl::string_view str) {\n std::vector components = absl::StrSplit(str, '.');\n if (components.size() != 3) {\n return absl::InvalidArgumentError(\n \"Version does not match the format X.Y.Z\");\n }\n SemanticVersion result{0, 0, 0};\n TF_ASSIGN_OR_RETURN(result.major(), ParseUnsignedNumber(components[0]));\n TF_ASSIGN_OR_RETURN(result.minor(), ParseUnsignedNumber(components[1]));\n TF_ASSIGN_OR_RETURN(result.patch(), ParseUnsignedNumber(components[2]));\n return result;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/stream_executor/semantic_version.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/hash/hash_testing.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/test.h\"\nnamespace stream_executor {\nnamespace {\nTEST(SemanticVersion, Construction) {\n SemanticVersion version{1, 2, 3};\n EXPECT_EQ(version.major(), 1);\n EXPECT_EQ(version.minor(), 2);\n EXPECT_EQ(version.patch(), 3);\n}\nTEST(SemanticVersion, ConstructionFromArray) {\n SemanticVersion version{std::array{1, 2, 3}};\n EXPECT_EQ(version.major(), 1);\n EXPECT_EQ(version.minor(), 2);\n EXPECT_EQ(version.patch(), 3);\n}\nTEST(SemanticVersion, Mutation) {\n SemanticVersion version{0, 0, 0};\n version.major() = 1;\n version.minor() = 2;\n version.patch() = 3;\n EXPECT_EQ(version.major(), 1);\n EXPECT_EQ(version.minor(), 2);\n EXPECT_EQ(version.patch(), 3);\n}\nTEST(SemanticVersion, ParseFromStringSuccess) {\n absl::StatusOr version =\n SemanticVersion::ParseFromString(\"1.2.3\");\n ASSERT_THAT(version, tsl::testing::IsOk());\n EXPECT_EQ(version->major(), 1);\n EXPECT_EQ(version->minor(), 2);\n EXPECT_EQ(version->patch(), 3);\n}\nTEST(SemanticVersion, ParseFromStringInvalid) {\n auto test = [](absl::string_view str) {\n absl::StatusOr version =\n SemanticVersion::ParseFromString(str);\n EXPECT_THAT(version,\n tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));\n };\n test(\"1.2\");\n test(\"1.2.3dev5\");\n}\nTEST(SemanticVersion, ToString) {\n SemanticVersion version{1, 2, 3};\n EXPECT_EQ(version.ToString(), \"1.2.3\");\n}\nTEST(SemanticVersion, AbslStringify) {\n SemanticVersion version{1, 2, 3};\n EXPECT_EQ(absl::StrCat(version), version.ToString());\n}\nTEST(SemanticVersion, OStream) {\n SemanticVersion version{1, 2, 3};\n std::ostringstream os;\n os << version;\n EXPECT_EQ(os.str(), version.ToString());\n}\nTEST(SemanticVersion, Equality) {\n SemanticVersion version{1, 2, 3};\n SemanticVersion other{1, 2, 4};\n EXPECT_EQ(version, version);\n EXPECT_FALSE(version != version);\n EXPECT_NE(version, other);\n EXPECT_FALSE(version == other);\n}\nTEST(SemanticVersion, Ordering) {\n std::array versions = {\n SemanticVersion{3, 3, 3}, SemanticVersion{0, 0, 0},\n SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4},\n SemanticVersion{1, 3, 0}};\n std::sort(versions.begin(), versions.end());\n EXPECT_THAT(versions, testing::ElementsAre(\n SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3},\n SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0},\n SemanticVersion{3, 3, 3}));\n}\nTEST(SemanticVersion, Hash) {\n EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({\n SemanticVersion{0, 0, 0},\n SemanticVersion{1, 2, 3},\n SemanticVersion{1, 2, 4},\n SemanticVersion{1, 3, 0},\n SemanticVersion{3, 3, 3},\n }));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":253,"cells":{"ID":{"kind":"string","value":"8ff2a112-c796-41ea-bcf0-13b123b8378b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"device_name_utils"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/tsl/util/device_name_utils.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tsl/util/device_name_utils_test.cc"},"Code":{"kind":"string","value":"#include \"xla/tsl/util/device_name_utils.h\"\n#include \n#include \"tsl/platform/errors.h\"\nnamespace tsl {\nstatic bool IsAlpha(char c) {\n return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');\n}\nstatic bool IsAlphaNumOrUnderscore(char c) {\n return IsAlpha(c) || (c >= '0' && c <= '9') || c == '_';\n}\nstatic bool IsJobName(absl::string_view in) {\n return !in.empty() && IsAlpha(in.front()) &&\n std::all_of(in.begin(), in.end(), IsAlphaNumOrUnderscore);\n}\nstatic bool ConsumePrefix(absl::string_view* in, string* out,\n absl::string_view prefix_terminators) {\n if (in->empty() || !IsAlpha(in->front())) return false;\n const auto end_it =\n std::find_first_of(in->begin(), in->end(), prefix_terminators.begin(),\n prefix_terminators.end());\n if (!std::all_of(in->begin(), end_it, IsAlphaNumOrUnderscore)) {\n return false;\n }\n out->assign(in->begin(), end_it);\n in->remove_prefix(end_it - in->begin());\n return true;\n}\nstatic bool ConsumeJobName(absl::string_view* in, string* job) {\n return ConsumePrefix(in, job, \"/\");\n}\nstatic bool ConsumeDeviceType(absl::string_view* in, string* device_type) {\n return ConsumePrefix(in, device_type, \"/:\");\n}\nstatic bool ConsumeNumber(absl::string_view* in, int* val) {\n uint64 tmp;\n if (str_util::ConsumeLeadingDigits(in, &tmp)) {\n *val = tmp;\n return true;\n } else {\n return false;\n }\n}\nstatic string DeviceName(const string& job, int replica, int task,\n const string& device_prefix, const string& device_type,\n int id) {\n CHECK(IsJobName(job)) << job;\n CHECK_LE(0, replica);\n CHECK_LE(0, task);\n CHECK(!device_type.empty());\n CHECK_LE(0, id);\n return strings::StrCat(\"/job:\", job, \"/replica:\", replica, \"/task:\", task,\n device_prefix, device_type, \":\", id);\n}\nstring DeviceNameUtils::FullName(const string& job, int replica, int task,\n const string& type, int id) {\n return DeviceName(job, replica, task, \"/device:\", type, id);\n}\nnamespace {\nstring LegacyName(const string& job, int replica, int task, const string& type,\n int id) {\n return DeviceName(job, replica, task, \"/\", absl::AsciiStrToLower(type), id);\n}\n} \nbool DeviceNameUtils::ParseFullName(absl::string_view fullname, ParsedName* p) {\n p->Clear();\n if (fullname == \"/\") {\n return true;\n }\n while (!fullname.empty()) {\n bool progress = false;\n if (absl::ConsumePrefix(&fullname, \"/job:\")) {\n p->has_job = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_job && !ConsumeJobName(&fullname, &p->job)) {\n return false;\n }\n progress = true;\n }\n if (absl::ConsumePrefix(&fullname, \"/replica:\")) {\n p->has_replica = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_replica && !ConsumeNumber(&fullname, &p->replica)) {\n return false;\n }\n progress = true;\n }\n if (absl::ConsumePrefix(&fullname, \"/task:\")) {\n p->has_task = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_task && !ConsumeNumber(&fullname, &p->task)) {\n return false;\n }\n progress = true;\n }\n if (absl::ConsumePrefix(&fullname, \"/device:\")) {\n p->has_type = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_type && !ConsumeDeviceType(&fullname, &p->type)) {\n return false;\n }\n if (!absl::ConsumePrefix(&fullname, \":\")) {\n p->has_id = false;\n } else {\n p->has_id = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {\n return false;\n }\n }\n progress = true;\n }\n if (absl::ConsumePrefix(&fullname, \"/cpu:\") ||\n absl::ConsumePrefix(&fullname, \"/CPU:\")) {\n p->has_type = true;\n p->type = \"CPU\"; \n p->has_id = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {\n return false;\n }\n progress = true;\n }\n if (absl::ConsumePrefix(&fullname, \"/gpu:\") ||\n absl::ConsumePrefix(&fullname, \"/GPU:\")) {\n p->has_type = true;\n p->type = \"GPU\"; \n p->has_id = !absl::ConsumePrefix(&fullname, \"*\");\n if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {\n return false;\n }\n progress = true;\n }\n if (!progress) {\n return false;\n }\n }\n return true;\n}\nbool DeviceNameUtils::ParseFullOrLocalName(absl::string_view fullname,\n ParsedName* p) {\n return ParseFullName(fullname, p) || ParseLocalName(fullname, p);\n}\nnamespace {\nvoid CompleteName(const DeviceNameUtils::ParsedName& parsed_basename,\n DeviceNameUtils::ParsedName* parsed_name) {\n if (!parsed_name->has_job) {\n parsed_name->job = parsed_basename.job;\n parsed_name->has_job = true;\n }\n if (!parsed_name->has_replica) {\n parsed_name->replica = parsed_basename.replica;\n parsed_name->has_replica = true;\n }\n if (!parsed_name->has_task) {\n parsed_name->task = parsed_basename.task;\n parsed_name->has_task = true;\n }\n if (!parsed_name->has_type) {\n parsed_name->type = parsed_basename.type;\n parsed_name->has_type = true;\n }\n if (!parsed_name->has_id) {\n parsed_name->id = parsed_basename.id;\n parsed_name->has_id = true;\n }\n}\n} \nabsl::Status DeviceNameUtils::CanonicalizeDeviceName(absl::string_view fullname,\n absl::string_view basename,\n string* canonical_name) {\n *canonical_name = \"\";\n ParsedName parsed_basename;\n if (!ParseFullName(basename, &parsed_basename)) {\n return errors::InvalidArgument(\"Could not parse basename: \", basename,\n \" into a device specification.\");\n }\n if (!(parsed_basename.has_job && parsed_basename.has_replica &&\n parsed_basename.has_task && parsed_basename.has_type &&\n parsed_basename.has_id)) {\n return errors::InvalidArgument(\"Basename: \", basename,\n \" should be fully \"\n \"specified.\");\n }\n ParsedName parsed_name;\n if (ParseLocalName(fullname, &parsed_name)) {\n CompleteName(parsed_basename, &parsed_name);\n *canonical_name = ParsedNameToString(parsed_name);\n return absl::OkStatus();\n }\n if (ParseFullName(fullname, &parsed_name)) {\n CompleteName(parsed_basename, &parsed_name);\n *canonical_name = ParsedNameToString(parsed_name);\n return absl::OkStatus();\n }\n return errors::InvalidArgument(\"Could not parse \", fullname,\n \" into a device \"\n \"specification.\");\n}\nstring DeviceNameUtils::ParsedNameToString(const ParsedName& pn) {\n string buf;\n if (pn.has_job) strings::StrAppend(&buf, \"/job:\", pn.job);\n if (pn.has_replica) strings::StrAppend(&buf, \"/replica:\", pn.replica);\n if (pn.has_task) strings::StrAppend(&buf, \"/task:\", pn.task);\n if (pn.has_type) {\n strings::StrAppend(&buf, \"/device:\", pn.type, \":\");\n if (pn.has_id) {\n strings::StrAppend(&buf, pn.id);\n } else {\n strings::StrAppend(&buf, \"*\");\n }\n }\n return buf;\n}\nbool DeviceNameUtils::IsSpecification(const ParsedName& less_specific,\n const ParsedName& more_specific) {\n if (less_specific.has_job &&\n (!more_specific.has_job || (less_specific.job != more_specific.job))) {\n return false;\n }\n if (less_specific.has_replica &&\n (!more_specific.has_replica ||\n (less_specific.replica != more_specific.replica))) {\n return false;\n }\n if (less_specific.has_task &&\n (!more_specific.has_task || (less_specific.task != more_specific.task))) {\n return false;\n }\n if (less_specific.has_type &&\n (!more_specific.has_type || (less_specific.type != more_specific.type))) {\n return false;\n }\n if (less_specific.has_id &&\n (!more_specific.has_id || (less_specific.id != more_specific.id))) {\n return false;\n }\n return true;\n}\nbool DeviceNameUtils::AreCompatibleDevNames(const ParsedName& a,\n const ParsedName& b) {\n if (a.has_job && b.has_job && (a.job != b.job)) {\n return false;\n }\n if (a.has_replica && b.has_replica && (a.replica != b.replica)) {\n return false;\n }\n if (a.has_task && b.has_task && (a.task != b.task)) {\n return false;\n }\n if (a.has_type && b.has_type && (a.type != b.type)) {\n return false;\n }\n if (a.has_id && b.has_id && (a.id != b.id)) {\n return false;\n }\n return true;\n}\nvoid DeviceNameUtils::EnsureSpecification(ParsedName* more_specific,\n const ParsedName& less_specific) {\n if (less_specific.has_job) {\n more_specific->has_job = true;\n more_specific->job = less_specific.job;\n }\n if (less_specific.has_replica) {\n more_specific->has_replica = true;\n more_specific->replica = less_specific.replica;\n }\n if (less_specific.has_task) {\n more_specific->has_task = true;\n more_specific->task = less_specific.task;\n }\n if (less_specific.has_type) {\n more_specific->has_type = true;\n more_specific->type = less_specific.type;\n }\n if (less_specific.has_id) {\n more_specific->has_id = true;\n more_specific->id = less_specific.id;\n }\n}\nbool DeviceNameUtils::IsCompleteSpecification(const ParsedName& pattern,\n const ParsedName& name) {\n CHECK(name.has_job && name.has_replica && name.has_task && name.has_type &&\n name.has_id);\n if (pattern.has_job && (pattern.job != name.job)) return false;\n if (pattern.has_replica && (pattern.replica != name.replica)) return false;\n if (pattern.has_task && (pattern.task != name.task)) return false;\n if (pattern.has_type && (pattern.type != name.type)) return false;\n if (pattern.has_id && (pattern.id != name.id)) return false;\n return true;\n}\nnamespace {\nabsl::Status MergeDevNamesImpl(DeviceNameUtils::ParsedName* target,\n const DeviceNameUtils::ParsedName& other,\n bool allow_soft_placement,\n bool override_conflicts) {\n const auto& ParsedNameToString = DeviceNameUtils::ParsedNameToString;\n if (other.has_job) {\n if (target->has_job && target->job != other.job) {\n return errors::InvalidArgument(\n \"Cannot merge devices with incompatible jobs: '\",\n ParsedNameToString(*target), \"' and '\", ParsedNameToString(other),\n \"'\");\n } else {\n target->has_job = other.has_job;\n target->job = other.job;\n }\n }\n if (other.has_replica) {\n if (target->has_replica && target->replica != other.replica) {\n return errors::InvalidArgument(\n \"Cannot merge devices with incompatible replicas: '\",\n ParsedNameToString(*target), \"' and '\", ParsedNameToString(other),\n \"'\");\n } else {\n target->has_replica = other.has_replica;\n target->replica = other.replica;\n }\n }\n if (other.has_task) {\n if (target->has_task && target->task != other.task) {\n return errors::InvalidArgument(\n \"Cannot merge devices with incompatible tasks: '\",\n ParsedNameToString(*target), \"' and '\", ParsedNameToString(other),\n \"'\");\n } else {\n target->has_task = other.has_task;\n target->task = other.task;\n }\n }\n if (other.has_type) {\n if (target->has_type && target->type != other.type) {\n if (!allow_soft_placement) {\n return errors::InvalidArgument(\n \"Cannot merge devices with incompatible types: '\",\n ParsedNameToString(*target), \"' and '\", ParsedNameToString(other),\n \"'\");\n } else if (override_conflicts) {\n target->type = other.type;\n } else {\n target->has_id = false;\n target->has_type = false;\n return absl::OkStatus();\n }\n } else {\n target->has_type = other.has_type;\n target->type = other.type;\n }\n }\n if (other.has_id) {\n if (target->has_id && target->id != other.id) {\n if (!allow_soft_placement) {\n return errors::InvalidArgument(\n \"Cannot merge devices with incompatible ids: '\",\n ParsedNameToString(*target), \"' and '\", ParsedNameToString(other),\n \"'\");\n } else if (override_conflicts) {\n target->id = other.id;\n } else {\n target->has_id = false;\n return absl::OkStatus();\n }\n } else {\n target->has_id = other.has_id;\n target->id = other.id;\n }\n }\n return absl::OkStatus();\n}\n} \nabsl::Status DeviceNameUtils::MergeDevNames(ParsedName* target,\n const ParsedName& other,\n bool allow_soft_placement) {\n return MergeDevNamesImpl(target, other, allow_soft_placement,\n false);\n}\nabsl::Status DeviceNameUtils::MergeOverrideDevNames(ParsedName* target,\n const ParsedName& other) {\n return MergeDevNamesImpl(target, other, true,\n true);\n}\nvoid DeviceNameUtils::MergeUnsetDevNames(ParsedName* target,\n const ParsedName& other) {\n if (other.has_job && !target->has_job) {\n target->has_job = other.has_job;\n target->job = other.job;\n }\n if (other.has_replica && !target->has_replica) {\n target->has_replica = other.has_replica;\n target->replica = other.replica;\n }\n if (other.has_task && !target->has_task) {\n target->has_task = other.has_task;\n target->task = other.task;\n }\n if (other.has_type && !target->has_type) {\n target->has_type = other.has_type;\n target->type = other.type;\n }\n if (other.has_id && !target->has_id) {\n target->has_id = other.has_id;\n target->id = other.id;\n }\n}\nbool DeviceNameUtils::IsSameAddressSpace(const ParsedName& a,\n const ParsedName& b) {\n return (a.has_job && b.has_job && (a.job == b.job)) &&\n (a.has_replica && b.has_replica && (a.replica == b.replica)) &&\n (a.has_task && b.has_task && (a.task == b.task));\n}\nbool DeviceNameUtils::IsSameAddressSpace(absl::string_view src,\n absl::string_view dst) {\n ParsedName x;\n ParsedName y;\n return ParseFullName(src, &x) && ParseFullName(dst, &y) &&\n IsSameAddressSpace(x, y);\n}\nbool DeviceNameUtils::IsDifferentAddressSpace(const ParsedName& a,\n const ParsedName& b) {\n return (a.has_job && b.has_job && (a.job != b.job)) ||\n (a.has_replica && b.has_replica && (a.replica != b.replica)) ||\n (a.has_task && b.has_task && (a.task != b.task));\n}\nconst DeviceNameUtils::ParsedName DeviceNameUtils::AddressSpace(\n const ParsedName& name) {\n ParsedName address_space;\n address_space.has_job = name.has_job;\n address_space.has_replica = name.has_replica;\n address_space.has_task = name.has_task;\n address_space.job = name.job;\n address_space.replica = name.replica;\n address_space.task = name.task;\n return address_space;\n}\nstring DeviceNameUtils::LocalName(absl::string_view type, int id) {\n return strings::StrCat(\"/device:\", type, \":\", id);\n}\nnamespace {\nstring LegacyLocalName(absl::string_view type, int id) {\n return strings::StrCat(type, \":\", id);\n}\n} \nstring DeviceNameUtils::LocalName(absl::string_view fullname) {\n ParsedName x;\n CHECK(ParseFullName(fullname, &x)) << fullname;\n return LocalName(x.type, x.id);\n}\nbool DeviceNameUtils::ParseLocalName(absl::string_view name, ParsedName* p) {\n if (!ConsumeDeviceType(&name, &p->type)) {\n return false;\n }\n p->has_type = true;\n if (!absl::ConsumePrefix(&name, \":\")) {\n return false;\n }\n if (!ConsumeNumber(&name, &p->id)) {\n return false;\n }\n p->has_id = true;\n return name.empty();\n}\nbool DeviceNameUtils::SplitDeviceName(absl::string_view name, string* task,\n string* device) {\n ParsedName pn;\n if (ParseFullName(name, &pn) && pn.has_type && pn.has_id) {\n task->clear();\n task->reserve(\n (pn.has_job ? (5 + pn.job.size()) : 0) +\n (pn.has_replica ? (9 + 4 ) : 0) +\n (pn.has_task ? (6 + 4 ) : 0));\n if (pn.has_job) {\n strings::StrAppend(task, \"/job:\", pn.job);\n }\n if (pn.has_replica) {\n strings::StrAppend(task, \"/replica:\", pn.replica);\n }\n if (pn.has_task) {\n strings::StrAppend(task, \"/task:\", pn.task);\n }\n device->clear();\n strings::StrAppend(device, pn.type, \":\", pn.id);\n return true;\n }\n return false;\n}\nbool DeviceNameUtils::GetTaskName(const ParsedName& pn, string* task) {\n if (pn.has_job && pn.has_replica && pn.has_task) {\n task->clear();\n task->reserve((5 + pn.job.size()) +\n (9 + 4 ) +\n (6 + 4 ));\n strings::StrAppend(task, \"/job:\", pn.job);\n strings::StrAppend(task, \"/replica:\", pn.replica);\n strings::StrAppend(task, \"/task:\", pn.task);\n return true;\n }\n return false;\n}\nstd::vector DeviceNameUtils::GetNamesForDeviceMappings(\n const ParsedName& pn) {\n if (pn.has_job && pn.has_replica && pn.has_task && pn.has_type && pn.has_id) {\n return {\n DeviceNameUtils::FullName(pn.job, pn.replica, pn.task, pn.type, pn.id),\n LegacyName(pn.job, pn.replica, pn.task, pn.type, pn.id)};\n } else {\n return {};\n }\n}\nstd::vector DeviceNameUtils::GetLocalNamesForDeviceMappings(\n const ParsedName& pn) {\n if (pn.has_type && pn.has_id) {\n return {DeviceNameUtils::LocalName(pn.type, pn.id),\n LegacyLocalName(pn.type, pn.id)};\n } else {\n return {};\n }\n}\n absl::Status DeviceNameUtils::DeviceNameToCpuDeviceName(\n const string& device_name, string* host_device_name) {\n DeviceNameUtils::ParsedName device;\n if (!DeviceNameUtils::ParseFullName(device_name, &device)) {\n return errors::Internal(\"Could not parse device name \", device_name);\n }\n device.type = \"CPU\";\n device.has_type = true;\n device.id = 0;\n device.has_id = true;\n *host_device_name = DeviceNameUtils::ParsedNameToString(device);\n return absl::OkStatus();\n}\nstd::ostream& operator<<(std::ostream& os,\n const DeviceNameUtils::ParsedName& x) {\n os << DeviceNameUtils::ParsedNameToString(x);\n return os;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/tsl/util/device_name_utils.h\"\n#include \n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/strcat.h\"\n#include \"tsl/platform/test.h\"\n#include \"tsl/platform/test_benchmark.h\"\nnamespace tsl {\nnamespace {\nbool RoundTripParsedName(const string& original, const string& expected) {\n DeviceNameUtils::ParsedName p;\n if (!DeviceNameUtils::ParseFullName(original, &p)) {\n return false;\n }\n string round_tripped = DeviceNameUtils::ParsedNameToString(p);\n return (round_tripped == expected);\n}\nenum NamePart { kJob = 0x01, kReplica = 0x02, kTask = 0x04, kDevice = 0x08 };\nbool RoundTripPartialName(int parts_to_test, const std::vector& parts,\n bool explicitDevice) {\n string original, expected;\n if (parts_to_test & kJob) {\n strings::StrAppend(&original, \"/job:\", parts[0]);\n strings::StrAppend(&expected, \"/job:\", parts[0]);\n }\n if (parts_to_test & kReplica) {\n strings::StrAppend(&original, \"/replica:\", parts[1]);\n strings::StrAppend(&expected, \"/replica:\", parts[1]);\n }\n if (parts_to_test & kTask) {\n strings::StrAppend(&original, \"/task:\", parts[2]);\n strings::StrAppend(&expected, \"/task:\", parts[2]);\n }\n if (parts_to_test & kDevice) {\n if (explicitDevice) {\n strings::StrAppend(&original, \"/device:\", parts[3]);\n strings::StrAppend(&expected, \"/device:\", parts[3]);\n } else {\n strings::StrAppend(&original, \"/\", parts[3]);\n strings::StrAppend(&expected,\n \"/device:\", absl::AsciiStrToUpper(parts[3]));\n }\n }\n return RoundTripParsedName(original, expected);\n}\n} \nTEST(DeviceNameUtilsTest, Basic) {\n EXPECT_EQ(DeviceNameUtils::FullName(\"hello\", 1, 2, \"CPU\", 3),\n \"/job:hello/replica:1/task:2/device:CPU:3\");\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\"foobar\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\n \"/job:123/replica:1/task:2/device:GPU:3\", &p));\n EXPECT_FALSE(\n DeviceNameUtils::ParseFullName(\"/job:123/replica:1/task:2/gpu:\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\n \"/job:123/replica:1/task:2/device:gpu:\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\n \"/job:foo/replica:-1/task:2/device:GPU:3\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\n \"/job:foo/replica:1/task:-2/device:GPU:3\", &p));\n EXPECT_FALSE(\n DeviceNameUtils::ParseFullName(\"/job:foo/replica:1/task:2/bar:3\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullName(\n \"/job:foo/replica:1/task:2/device:GPU:3/extra\", &p));\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\n \"/job:foo/replica:1/task:2/device:GPU:3\", &p));\n EXPECT_TRUE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_TRUE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_TRUE(p.has_id);\n EXPECT_EQ(p.job, \"foo\");\n EXPECT_EQ(p.replica, 1);\n EXPECT_EQ(p.task, 2);\n EXPECT_EQ(p.type, \"GPU\");\n EXPECT_EQ(p.id, 3);\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\n \"/job:foo_bar/replica:1/task:2/device:GPU:3\", &p));\n EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName(\n \"/job:foo_bar/replica:1/task:2/device:GPU:3\", &p));\n EXPECT_TRUE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_TRUE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_TRUE(p.has_id);\n EXPECT_EQ(p.job, \"foo_bar\");\n EXPECT_EQ(p.replica, 1);\n EXPECT_EQ(p.task, 2);\n EXPECT_EQ(p.type, \"GPU\");\n EXPECT_EQ(p.id, 3);\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\n \"/job:foo_bar/replica:1/task:2/device:GPU:3\", &p));\n EXPECT_TRUE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_TRUE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_TRUE(p.has_id);\n EXPECT_EQ(p.job, \"foo_bar\");\n EXPECT_EQ(p.replica, 1);\n EXPECT_EQ(p.task, 2);\n EXPECT_EQ(p.type, \"GPU\");\n EXPECT_EQ(p.id, 3);\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\"/job:*/replica:4/gpu:*\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_FALSE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"GPU\");\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(\n DeviceNameUtils::ParseFullName(\"/job:*/replica:4/device:GPU:*\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_FALSE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"GPU\");\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(\n DeviceNameUtils::ParseFullName(\"/job:*/device:GPU/replica:4\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_FALSE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"GPU\");\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\n \"/job:*/replica:4/device:myspecialdevice:13\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_TRUE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"myspecialdevice\");\n EXPECT_EQ(p.id, 13);\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\"/\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_FALSE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_FALSE(p.has_type);\n EXPECT_FALSE(p.has_id);\n }\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(\n DeviceNameUtils::ParseFullName(\"/job:*/replica:4/device:GPU:5\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_TRUE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"GPU\");\n EXPECT_EQ(p.id, 5);\n }\n { \n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(\"/gpu:*/job:*/replica:4\", &p));\n EXPECT_FALSE(p.has_job);\n EXPECT_TRUE(p.has_replica);\n EXPECT_FALSE(p.has_task);\n EXPECT_TRUE(p.has_type);\n EXPECT_FALSE(p.has_id);\n EXPECT_EQ(p.replica, 4);\n EXPECT_EQ(p.type, \"GPU\");\n }\n EXPECT_TRUE(DeviceNameUtils::IsSameAddressSpace(\n \"/job:foo/replica:1/task:2/cpu:3\",\n \"/job:foo/replica:1/task:2/device:GPU:4\"));\n EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(\n \"/job:foo/replica:1/task:2/cpu:3\",\n \"/job:foo/replica:1/task:3/device:GPU:4\"));\n EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(\n \"/job:foo/replica:1/task:2/cpu:3\",\n \"/job:foo/replica:10/task:2/device:GPU:4\"));\n EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(\n \"/job:foo/replica:1/task:2/cpu:3\",\n \"/job:bar/replica:1/task:2/device:GPU:4\"));\n EXPECT_EQ(DeviceNameUtils::LocalName(\"CPU\", 1), \"/device:CPU:1\");\n EXPECT_EQ(DeviceNameUtils::LocalName(\"GPU\", 2), \"/device:GPU:2\");\n EXPECT_EQ(DeviceNameUtils::LocalName(\"MySpecialDevice\", 13),\n \"/device:MySpecialDevice:13\");\n EXPECT_EQ(\n DeviceNameUtils::LocalName(\"/job:foo/replica:1/task:2/device:CPU:3\"),\n \"/device:CPU:3\");\n EXPECT_EQ(DeviceNameUtils::LocalName(\"/job:foo/replica:1/task:2/cpu:3\"),\n \"/device:CPU:3\");\n EXPECT_EQ(\n DeviceNameUtils::LocalName(\"/job:foo/replica:1/task:2/device:abc:73\"),\n \"/device:abc:73\");\n {\n DeviceNameUtils::ParsedName p;\n EXPECT_TRUE(DeviceNameUtils::ParseLocalName(\"CPU:10\", &p));\n EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName(\"CPU:10\", &p));\n EXPECT_EQ(p.type, \"CPU\");\n EXPECT_EQ(p.id, 10);\n EXPECT_FALSE(DeviceNameUtils::ParseLocalName(\"cpu:abc\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseLocalName(\"abc:\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseLocalName(\"abc\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseLocalName(\"myspecialdevice\", &p));\n EXPECT_FALSE(DeviceNameUtils::ParseFullOrLocalName(\"myspecialdevice\", &p));\n }\n {\n for (int i = 0; i < 0x10; ++i) {\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"CPU:3\"},\n false));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"GPU:3\"},\n false));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"cpu:3\"},\n false));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"gpu:3\"},\n false));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"CPU:3\"},\n true));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"GPU:3\"},\n true));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"cpu:3\"},\n true));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"gpu:3\"},\n true));\n EXPECT_TRUE(RoundTripPartialName(i, {\"foo\", \"3\", \"2\", \"someDevice:3\"},\n true));\n }\n }\n {\n DeviceNameUtils::ParsedName x, y;\n DeviceNameUtils::ParseFullName(\"/job:work/replica:1/task:3/device:GPU:*\",\n &x);\n DeviceNameUtils::ParseFullName(\"/device:CPU:*\", &y);\n EXPECT_FALSE(DeviceNameUtils::AreCompatibleDevNames(x, y));\n }\n {\n DeviceNameUtils::ParsedName x, y;\n DeviceNameUtils::ParseFullName(\"/job:work/replica:1/task:3\", &x);\n DeviceNameUtils::ParseFullName(\"/device:CPU:*\", &y);\n EXPECT_TRUE(DeviceNameUtils::AreCompatibleDevNames(x, y));\n }\n}\nstatic bool IsCSHelper(absl::string_view pattern, absl::string_view actual) {\n DeviceNameUtils::ParsedName p, a;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));\n return DeviceNameUtils::IsCompleteSpecification(p, a);\n}\nTEST(DeviceNameUtilsTest, IsCompleteSpecification) {\n EXPECT_TRUE(IsCSHelper(\"/job:*\", \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsCSHelper(\"/job:*/replica:*\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(\n IsCSHelper(\"/job:*/task:*\", \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsCSHelper(\"/job:*/replica:*/task:*\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsCSHelper(\"/job:*/replica:*/gpu:*\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_FALSE(\n IsCSHelper(\"/cpu:*\", \"/job:worker/replica:1/task:2/device:GPU:3\"));\n EXPECT_FALSE(\n IsCSHelper(\"/device:GPU:2\", \"/job:worker/replica:1/task:2/device:GPU:1\"));\n EXPECT_TRUE(\n IsCSHelper(\"/gpu:*\", \"/job:worker/replica:1/task:2/device:GPU:3\"));\n}\nstatic bool IsSpecHelper(absl::string_view pattern, absl::string_view actual) {\n DeviceNameUtils::ParsedName p, a;\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));\n EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));\n return DeviceNameUtils::IsSpecification(p, a);\n}\nTEST(DeviceNameUtilsTest, IsSpecification) {\n EXPECT_TRUE(\n IsSpecHelper(\"/job:*\", \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:*\", \"/job:work/replica:1/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:*\", \"/job:work/replica:1\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:*\", \"/replica:1\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:*\", \"/job:work\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:*/replica:*\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:work/replica:1/gpu:*\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:work/replica:1/device:GPU:3\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:work/replica:1/task:2\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/job:work/replica:*/task:2\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/task:*\", \"/job:*/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/task:2\", \"/job:*/replica:1/task:2/device:GPU:3\"));\n EXPECT_TRUE(IsSpecHelper(\"/cpu:*\", \"/job:*/replica:1/task:2/cpu:1\"));\n EXPECT_TRUE(IsSpecHelper(\"/cpu:0\", \"/cpu:0\"));\n EXPECT_TRUE(\n IsSpecHelper(\"/gpu:*\", \"/job:worker/replica:1/task:2/device:GPU:3\"));\n EXPECT_FALSE(\n IsSpecHelper(\"/job:worker/replica:1/task:2/device:GPU:3\", \"/gpu:*\"));\n EXPECT_FALSE(IsSpecHelper(\"/cpu:*\", \"/job:*/replica:1/task:2\"));\n EXPECT_FALSE(IsSpecHelper(\"/cpu:*\", \"/job:*/replica:1/task:2/device:GPU:1\"));\n EXPECT_FALSE(\n IsSpecHelper(\"/cpu:*\", \"/job:worker/replica:1/task:2/device:GPU:3\"));\n EXPECT_FALSE(IsSpecHelper(\"/device:GPU:2\",\n \"/job:worker/replica:1/task:2/device:GPU:1\"));\n EXPECT_FALSE(IsSpecHelper(\"/job:work/replica:*/task:0\",\n \"/job:work/replica:1/task:2/device:GPU:3\"));\n EXPECT_FALSE(IsSpecHelper(\"/job:work/replica:0/task:2\",\n \"/job:work/replica:*/task:2/device:GPU:3\"));\n}\nTEST(DeviceNameUtilsTest, SplitDeviceName) {\n string task;\n string device;\n EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(\n \"/job:foo/replica:1/task:2/cpu:1\", &task, &device));\n EXPECT_EQ(\"/job:foo/replica:1/task:2\", task);\n EXPECT_EQ(\"CPU:1\", device);\n EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(\n \"/job:foo/cpu:1/task:2/replica:1\", &task, &device));\n EXPECT_EQ(\"/job:foo/replica:1/task:2\", task);\n EXPECT_EQ(\"CPU:1\", device);\n EXPECT_TRUE(\n DeviceNameUtils::SplitDeviceName(\"/device:GPU:3\", &task, &device));\n EXPECT_EQ(\"\", task);\n EXPECT_EQ(\"GPU:3\", device);\n EXPECT_FALSE(DeviceNameUtils::SplitDeviceName(\"gpu:3\", &task, &device));\n EXPECT_FALSE(DeviceNameUtils::SplitDeviceName(\"/job:foo/task:2/replica:1\",\n &task, &device));\n EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(\"/device:myspecialdevice:3\",\n &task, &device));\n EXPECT_EQ(\"\", task);\n EXPECT_EQ(\"myspecialdevice:3\", device);\n}\nstatic DeviceNameUtils::ParsedName Name(const string& str) {\n DeviceNameUtils::ParsedName ret;\n CHECK(DeviceNameUtils::ParseFullName(str, &ret)) << \"Invalid name: \" << str;\n return ret;\n}\nstatic void MergeDevNamesHelperImpl(const string& name_a, const string& name_b,\n const string& expected_merge_name,\n bool allow_soft_placement) {\n DeviceNameUtils::ParsedName target_a = Name(name_a);\n TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b),\n allow_soft_placement));\n DeviceNameUtils::ParsedName target_b = Name(name_b);\n TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a),\n allow_soft_placement));\n EXPECT_EQ(target_a, target_b);\n EXPECT_EQ(target_a, Name(expected_merge_name));\n EXPECT_EQ(target_b, Name(expected_merge_name));\n}\nstatic void MergeDevNamesHelper(const string& name_a, const string& name_b,\n const string& expected_merge_name) {\n MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, false);\n}\nstatic void MergeDevNamesHelperAllowSoftPlacement(\n const string& name_a, const string& name_b,\n const string& expected_merge_name) {\n MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, true);\n}\nstatic void MergeDevNamesError(const string& name_a, const string& name_b,\n const string& expected_error_substr) {\n DeviceNameUtils::ParsedName target_a = Name(name_a);\n absl::Status s = DeviceNameUtils::MergeDevNames(&target_a, Name(name_b));\n EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);\n EXPECT_TRUE(absl::StrContains(s.message(), expected_error_substr)) << s;\n}\nstatic void MergeOverrideHelper(const string& target, const string& name,\n const string& expected_merge_name) {\n DeviceNameUtils::ParsedName parsed_target = Name(target);\n TF_EXPECT_OK(\n DeviceNameUtils::MergeOverrideDevNames(&parsed_target, Name(name)));\n DeviceNameUtils::ParsedName parsed_expected = Name(expected_merge_name);\n EXPECT_EQ(parsed_target, parsed_expected)\n << \"parsed_target: \" << DeviceNameUtils::ParsedNameToString(parsed_target)\n << \" expected_name: \"\n << DeviceNameUtils::ParsedNameToString(parsed_expected);\n}\nstatic void MergeUnsetDevNamesHelper(const string& name_a, const string& name_b,\n const string& expected_merge_name_ab,\n const string& expected_merge_name_ba) {\n DeviceNameUtils::ParsedName target_a = Name(name_a);\n DeviceNameUtils::MergeUnsetDevNames(&target_a, Name(name_b));\n EXPECT_EQ(target_a, Name(expected_merge_name_ab));\n DeviceNameUtils::ParsedName target_b = Name(name_b);\n DeviceNameUtils::MergeUnsetDevNames(&target_b, Name(name_a));\n EXPECT_EQ(target_b, Name(expected_merge_name_ba));\n}\nTEST(DeviceNameUtilsTest, MergeDevNames) {\n MergeDevNamesHelper(\"\", \"\", \"\");\n MergeDevNamesHelper(\"/job:foo/replica:1/task:2/cpu:1\",\n \"/job:foo/replica:1/task:2/cpu:1\",\n \"/job:foo/replica:1/task:2/cpu:1\");\n MergeDevNamesHelper(\"\", \"/job:foo\", \"/job:foo\");\n MergeDevNamesHelper(\"\", \"/replica:2\", \"/replica:2\");\n MergeDevNamesHelper(\"\", \"/task:7\", \"/task:7\");\n MergeDevNamesHelper(\"\", \"/device:GPU:1\", \"/device:GPU:1\");\n MergeDevNamesHelper(\"/job:foo\", \"/task:7\", \"/job:foo/task:7\");\n MergeDevNamesHelper(\"/job:foo\", \"/device:GPU:1\", \"/job:foo/device:GPU:1\");\n MergeDevNamesHelper(\"/job:foo/replica:0\", \"/replica:0/task:1\",\n \"/job:foo/replica:0/task:1\");\n MergeDevNamesHelper(\"\", \"/gpu:*\", \"/gpu:*\");\n MergeDevNamesHelper(\"/gpu:*\", \"/gpu:*\", \"/gpu:*\");\n MergeDevNamesHelper(\"/device:GPU:1\", \"/gpu:*\", \"/device:GPU:1\");\n MergeDevNamesError(\"/job:foo\", \"/job:bar\", \"incompatible jobs\");\n MergeDevNamesError(\"/replica:0\", \"/replica:1\", \"incompatible replicas\");\n MergeDevNamesError(\"/task:0\", \"/task:1\", \"incompatible tasks\");\n MergeDevNamesError(\"/gpu:*\", \"/cpu:*\", \"incompatible types\");\n MergeDevNamesError(\"/device:GPU:0\", \"/device:GPU:1\", \"incompatible ids\");\n}\nTEST(DeviceNameUtilsTest, MergeDevNamesAllowSoftPlacement) {\n MergeDevNamesHelperAllowSoftPlacement(\"/gpu:*\", \"/cpu:1\", \"\");\n MergeDevNamesHelperAllowSoftPlacement(\"/cpu:*\", \"/device:GPU:1\", \"\");\n MergeDevNamesHelperAllowSoftPlacement(\"/device:GPU:1\", \"/device:GPU:2\",\n \"/device:GPU:*\");\n}\nTEST(DeviceNameUtilsTest, MergeOverrideDevNames) {\n MergeOverrideHelper(\"\", \"\", \"\");\n MergeOverrideHelper(\"/job:foo/replica:1/task:2/cpu:1\",\n \"/job:foo/replica:1/task:2/cpu:1\",\n \"/job:foo/replica:1/task:2/cpu:1\");\n MergeOverrideHelper(\"\", \"/job:foo\", \"/job:foo\");\n MergeOverrideHelper(\"\", \"/replica:2\", \"/replica:2\");\n MergeOverrideHelper(\"\", \"/task:7\", \"/task:7\");\n MergeOverrideHelper(\"\", \"/device:GPU:1\", \"/device:GPU:1\");\n MergeOverrideHelper(\"/job:foo\", \"/task:7\", \"/job:foo/task:7\");\n MergeOverrideHelper(\"/job:foo\", \"/device:GPU:1\", \"/job:foo/device:GPU:1\");\n MergeOverrideHelper(\"/job:foo/replica:0\", \"/replica:0/task:1\",\n \"/job:foo/replica:0/task:1\");\n MergeOverrideHelper(\"\", \"/gpu:*\", \"/gpu:*\");\n MergeOverrideHelper(\"/gpu:*\", \"/gpu:*\", \"/gpu:*\");\n MergeOverrideHelper(\"/device:GPU:1\", \"/gpu:*\", \"/device:GPU:1\");\n MergeOverrideHelper(\"/gpu:0\", \"/cpu:1\", \"/cpu:1\");\n MergeOverrideHelper(\"/gpu:*\", \"/cpu:1\", \"/cpu:1\");\n MergeOverrideHelper(\"/cpu:*\", \"/device:GPU:1\", \"/gpu:1\");\n MergeOverrideHelper(\"/device:GPU:1\", \"/device:GPU:2\", \"/device:GPU:2\");\n MergeOverrideHelper(\"/job:foo/CPU:*\", \"/device:GPU:1\", \"/job:foo/GPU:1\");\n MergeOverrideHelper(\"/cpu:*\", \"/job:foo/device:GPU:1\", \"/job:foo/GPU:1\");\n MergeOverrideHelper(\"/task:0/cpu:*\", \"/device:GPU:1\", \"/task:0/GPU:1\");\n MergeOverrideHelper(\"/cpu:*\", \"/task:0/device:GPU:1\", \"/task:0/GPU:1\");\n}\nTEST(DeviceNameUtilsTest, MergeUnsetDevNames) {\n MergeUnsetDevNamesHelper(\"\", \"\", \"\", \"\");\n MergeUnsetDevNamesHelper(\n \"/job:foo/replica:1/task:2/cpu:1\", \"/job:foo/replica:1/task:2/cpu:1\",\n \"/job:foo/replica:1/task:2/cpu:1\", \"/job:foo/replica:1/task:2/cpu:1\");\n MergeUnsetDevNamesHelper(\"\", \"/job:foo\", \"/job:foo\", \"/job:foo\");\n MergeUnsetDevNamesHelper(\"\", \"/replica:2\", \"/replica:2\", \"/replica:2\");\n MergeUnsetDevNamesHelper(\"\", \"/task:7\", \"/task:7\", \"/task:7\");\n MergeUnsetDevNamesHelper(\"\", \"/device:GPU:1\", \"/device:GPU:1\",\n \"/device:GPU:1\");\n MergeUnsetDevNamesHelper(\"/job:foo\", \"/task:7\", \"/job:foo/task:7\",\n \"/job:foo/task:7\");\n MergeUnsetDevNamesHelper(\"/job:foo\", \"/device:GPU:1\", \"/job:foo/device:GPU:1\",\n \"/job:foo/device:GPU:1\");\n MergeUnsetDevNamesHelper(\"/job:foo/replica:0\", \"/replica:0/task:1\",\n \"/job:foo/replica:0/task:1\",\n \"/job:foo/replica:0/task:1\");\n MergeUnsetDevNamesHelper(\"\", \"/gpu:*\", \"/gpu:*\", \"/gpu:*\");\n MergeUnsetDevNamesHelper(\"/gpu:*\", \"/gpu:*\", \"/gpu:*\", \"/gpu:*\");\n MergeUnsetDevNamesHelper(\"/device:GPU:1\", \"/gpu:*\", \"/device:GPU:1\",\n \"/device:GPU:1\");\n MergeUnsetDevNamesHelper(\"/job:foo\", \"/job:bar\", \"/job:foo\", \"/job:bar\");\n MergeUnsetDevNamesHelper(\"/replica:0\", \"/replica:1\", \"/replica:0\",\n \"/replica:1\");\n MergeUnsetDevNamesHelper(\"/task:0\", \"/task:1\", \"/task:0\", \"/task:1\");\n MergeUnsetDevNamesHelper(\"/gpu:*\", \"/cpu:*\", \"/gpu:*\", \"/cpu:*\");\n MergeUnsetDevNamesHelper(\"/device:GPU:0\", \"/device:GPU:1\", \"/device:GPU:0\",\n \"/device:GPU:1\");\n MergeUnsetDevNamesHelper(\"/job:foo/device:GPU\", \"/job:bar\",\n \"/job:foo/device:GPU\", \"/job:bar/device:GPU\");\n}\nTEST(DeviceNameUtilsTest, GetNamesForDeviceMappings) {\n DeviceNameUtils::ParsedName p =\n Name(\"/job:foo/replica:10/task:0/device:GPU:1\");\n EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), \",\"),\n \"/job:foo/replica:10/task:0/device:GPU:1,\"\n \"/job:foo/replica:10/task:0/gpu:1\");\n p.has_task = false;\n EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), \",\"),\n \"\");\n}\nTEST(DeviceNameUtilsTest, CanonicalizeDeviceName) {\n string canonical_name;\n {\n string basename = \"/job:foo/replica:10/task:0/device:CPU:0\";\n TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(\n \"/job:foo/replica:10/task:0/device:CPU:1\", basename, &canonical_name));\n EXPECT_EQ(\"/job:foo/replica:10/task:0/device:CPU:1\", canonical_name);\n TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(\n \"/job:foo/task:0/replica:10/device:CPU:1\", basename, &canonical_name));\n EXPECT_EQ(\"/job:foo/replica:10/task:0/device:CPU:1\", canonical_name);\n TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(\n \"/job:foo/task:0/replica:10/cpu:1\", basename, &canonical_name));\n EXPECT_EQ(\"/job:foo/replica:10/task:0/device:CPU:1\", canonical_name);\n TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(\"CPU:0\", basename,\n &canonical_name));\n EXPECT_EQ(\"/job:foo/replica:10/task:0/device:CPU:0\", canonical_name);\n absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(\n \"/job:foo/task:0/replica/cpu:1\", basename, &canonical_name);\n EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);\n EXPECT_EQ(\"\", canonical_name);\n }\n {\n string fullname = \"/device:CPU:0\";\n absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(\n fullname, \"/device:CPU:0\", &canonical_name);\n EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);\n EXPECT_EQ(\"\", canonical_name);\n s = DeviceNameUtils::CanonicalizeDeviceName(\n fullname, \"/job:foo/task:0/replica/cpu:1\", &canonical_name);\n EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);\n EXPECT_EQ(\"\", canonical_name);\n }\n}\nTEST(DeviceNameUtilsTest, CompareFullNames) {\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/cpu:0\", \"/job:foo/replica:0/task:0/cpu:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:1\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:1/device:CPU:0\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:1/task:0/device:CPU:0\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:goo/replica:0/task:0/device:CPU:0\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:GPU:0\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/job:foo/replica:0/task:0/device:CPU:1\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/job:foo/replica:0/task:1/device:CPU:0\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/job:foo/replica:1/task:0/device:CPU:0\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/job:goo/replica:0/task:0/device:CPU:0\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/job:foo/replica:0/task:0/device:GPU:0\"));\n EXPECT_FALSE(\n DeviceNameUtils::CompareFullNames(\"/device:CPU:1\", \"unparseablename\"));\n EXPECT_TRUE(\n DeviceNameUtils::CompareFullNames(\"unparseablename\", \"/device:CPU:1\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/replica:0/task:0/device:CPU:1\",\n \"/job:foo/replica:0/task:0/device:CPU:0\"));\n EXPECT_FALSE(DeviceNameUtils::CompareFullNames(\n \"/job:foo/replica:0/task:0/device:CPU:0\",\n \"/replica:0/task:0/device:CPU:0\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\n \"/replica:0/task:0/device:CPU:0\", \"/replica:0/task:0/device:CPU:1\"));\n EXPECT_TRUE(DeviceNameUtils::CompareFullNames(\"/task:0/device:CPU:0\",\n \"/task:0/device:CPU:1\"));\n EXPECT_TRUE(\n DeviceNameUtils::CompareFullNames(\"/device:CPU:0\", \"/device:CPU:1\"));\n}\nstatic void BM_ParseFullName(::testing::benchmark::State& state) {\n DeviceNameUtils::ParsedName p;\n for (auto s : state) {\n DeviceNameUtils::ParseFullName(\"/job:worker/replica:3/task:0/cpu:0\", &p);\n }\n}\nBENCHMARK(BM_ParseFullName);\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":254,"cells":{"ID":{"kind":"string","value":"2cd5293f-5da0-4198-a703-16efdd40591a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"collective_permute_motion"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/spmd/collective_permute_motion.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/spmd/collective_permute_motion_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/spmd/collective_permute_motion.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/comparison_util.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/service/while_loop_analysis.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nabsl::flat_hash_set FindLoopConsts(HloComputation* body) {\n HloInstruction* root = body->root_instruction();\n CHECK_EQ(root->opcode(), HloOpcode::kTuple);\n absl::flat_hash_set loop_consts;\n for (int64_t i = 0; i < root->operand_count(); ++i) {\n HloInstruction* output = root->mutable_operand(i);\n while (output->opcode() == HloOpcode::kReshape ||\n output->opcode() == HloOpcode::kCopy) {\n output = output->mutable_operand(0);\n }\n if (output->opcode() == HloOpcode::kGetTupleElement &&\n output->tuple_index() == i &&\n output->operand(0) == body->parameter_instruction(0)) {\n loop_consts.insert(output);\n }\n }\n for (HloInstruction* inst : body->MakeInstructionPostOrder()) {\n if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota ||\n inst->opcode() == HloOpcode::kReplicaId ||\n inst->opcode() == HloOpcode::kPartitionId) {\n loop_consts.insert(inst);\n continue;\n }\n if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast &&\n inst->opcode() != HloOpcode::kReduce &&\n inst->opcode() != HloOpcode::kReshape &&\n inst->opcode() != HloOpcode::kDynamicSlice &&\n inst->opcode() != HloOpcode::kTranspose) {\n continue;\n }\n if (inst->HasSideEffectNoRecurse()) {\n continue;\n }\n if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) {\n return loop_consts.contains(operand);\n })) {\n loop_consts.insert(inst);\n }\n }\n return loop_consts;\n}\nconstexpr int64_t kMaxMovableClusterSize = 8;\nstruct MovableCluster {\n int64_t root_tuple_index;\n std::vector reverse_order_instructions;\n HloInstruction* collective_permute = nullptr;\n};\nstd::optional FindMovableClusterAtBodyRoot(\n HloComputation* body, int64_t root_tuple_index,\n const absl::flat_hash_set& loop_consts) {\n HloInstruction* root = body->root_instruction();\n CHECK_EQ(root->opcode(), HloOpcode::kTuple);\n MovableCluster cluster;\n cluster.root_tuple_index = root_tuple_index;\n std::deque queue;\n queue.push_back(root->mutable_operand(root_tuple_index));\n while (!queue.empty()) {\n HloInstruction* visiting = queue.front();\n queue.pop_front();\n if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) {\n VLOG(2) << \"Cannot move: too many instructions to move\";\n return std::nullopt;\n }\n if (visiting->user_count() > 1) {\n VLOG(2) << \"Cannot move: \" << visiting->name() << \" used multiple times\";\n return std::nullopt;\n }\n cluster.reverse_order_instructions.push_back(visiting);\n if (visiting->opcode() == HloOpcode::kCollectivePermute) {\n if (cluster.collective_permute != nullptr) {\n VLOG(2) << \"Cannot move: \" << visiting->name()\n << \" multiple collective permutes\";\n return std::nullopt;\n }\n cluster.collective_permute = visiting;\n continue;\n }\n if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) {\n VLOG(2) << \"Cannot move: \" << visiting->name() << \" unsupported op\";\n return std::nullopt;\n }\n for (HloInstruction* operand : visiting->mutable_operands()) {\n if (!loop_consts.contains(operand)) {\n queue.push_back(operand);\n }\n }\n }\n if (cluster.collective_permute == nullptr) {\n return std::nullopt;\n }\n return cluster;\n}\nabsl::flat_hash_set FindIndicesUnusedAfterLoop(HloInstruction* loop) {\n absl::flat_hash_set indices;\n int64_t count = loop->shape().tuple_shapes_size();\n for (int64_t i = 0; i < count; ++i) {\n indices.insert(i);\n }\n for (HloInstruction* user : loop->users()) {\n if (user->opcode() != HloOpcode::kGetTupleElement) {\n indices.clear();\n break;\n }\n indices.erase(user->tuple_index());\n }\n return indices;\n}\nabsl::StatusOr MoveCollectivePermutes(HloComputation* computation,\n HloInstruction* loop) {\n HloComputation* body = loop->while_body();\n HloInstruction* root = body->root_instruction();\n if (root->opcode() != HloOpcode::kTuple ||\n loop->operand(0)->opcode() != HloOpcode::kTuple) {\n return false;\n }\n auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop);\n if (!maybe_induction_var_idx.has_value()) {\n VLOG(2) << \"Skip \" << loop->name() << \", no induction var\";\n return false;\n }\n absl::flat_hash_map output_appear_counts;\n for (const HloInstruction* operand : root->operands()) {\n auto res = output_appear_counts.emplace(operand, 1);\n if (!res.second) {\n res.first->second++;\n }\n }\n absl::flat_hash_set unused_indices_after_loop =\n FindIndicesUnusedAfterLoop(loop);\n const absl::flat_hash_set loop_consts = FindLoopConsts(body);\n int64_t induction_var_idx = *maybe_induction_var_idx;\n std::vector input_gtes(root->operand_count(), nullptr);\n absl::flat_hash_set multi_use_indices;\n for (HloInstruction* user : body->parameter_instruction(0)->users()) {\n if (user->opcode() != HloOpcode::kGetTupleElement) {\n VLOG(2) << \"Skip \" << loop->name() << \", non-GTE input use\";\n return false;\n }\n if (multi_use_indices.contains(user->tuple_index())) {\n continue;\n }\n if (input_gtes[user->tuple_index()] != nullptr) {\n multi_use_indices.insert(user->tuple_index());\n input_gtes[user->tuple_index()] = nullptr;\n } else {\n input_gtes[user->tuple_index()] = user;\n }\n }\n HloInstruction* ind_var = input_gtes[induction_var_idx];\n if (ind_var == nullptr || ind_var->shape().rank() > 0) {\n VLOG(2) << \"Skip \" << loop->name() << \", non-scalar induction var\";\n return false;\n }\n if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd &&\n root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) {\n VLOG(2) << \"Skip \" << loop->name() << \", non-add/sub induction var\";\n return false;\n }\n if (root->operand(induction_var_idx)->operand(0) == ind_var) {\n if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) {\n VLOG(2) << \"Skip \" << loop->name() << \", non-add/sub const induction var\";\n return false;\n }\n } else if (root->operand(induction_var_idx)->operand(1) == ind_var) {\n if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) {\n VLOG(2) << \"Skip \" << loop->name() << \", non-add/sub const induction var\";\n return false;\n }\n } else {\n return false;\n }\n HloInstruction* ind_var_orig =\n loop->mutable_operand(0)->mutable_operand(induction_var_idx);\n if (!ind_var_orig->IsConstant()) {\n VLOG(2) << \"Skip \" << loop->name()\n << \", non-constant initial induction var\";\n return false;\n }\n bool changed = false;\n std::vector movable_outputs;\n for (int64_t i = 0; i < root->operand_count(); ++i) {\n if (output_appear_counts[root->operand(i)] > 1) {\n VLOG(2) << \"Skip \" << loop->name() << \" index \" << i\n << \" appears multiple times in output.\";\n continue;\n }\n if (!unused_indices_after_loop.contains(i)) {\n VLOG(2) << \"Skip \" << loop->name() << \" index \" << i\n << \" used after loop.\";\n continue;\n }\n auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts);\n if (!cluster.has_value()) {\n VLOG(2) << \"Skip \" << loop->name() << \" index \" << i\n << \" did not find a movable cluster.\";\n continue;\n }\n HloInstruction* input = input_gtes[cluster->root_tuple_index];\n HloInstruction* cp = cluster->collective_permute;\n if (input == nullptr || cp->operand(0) == input) {\n VLOG(2) << \"Skip \" << loop->name() << \" index \" << i\n << \" collective-permute already at top.\";\n continue;\n }\n const std::vector original_input_users = input->users();\n absl::flat_hash_map replacement;\n replacement[cp->operand(0)] = input;\n for (auto it = cluster->reverse_order_instructions.rbegin();\n it != cluster->reverse_order_instructions.rend(); ++it) {\n HloInstruction* inst = *it;\n std::vector new_operands;\n for (HloInstruction* operand : inst->mutable_operands()) {\n auto rit = replacement.find(operand);\n if (rit != replacement.end()) {\n new_operands.push_back(rit->second);\n } else {\n new_operands.push_back(operand);\n }\n }\n HloInstruction* clone = body->AddInstruction(\n inst->CloneWithNewOperands(inst->shape(), new_operands));\n replacement[inst] = clone;\n }\n HloInstruction* new_input =\n replacement[cluster->reverse_order_instructions[0]];\n if (ind_var_orig->parent() != body) {\n ind_var_orig = body->AddInstruction(ind_var_orig->Clone());\n }\n HloInstruction* is_first_iter =\n body->AddInstruction(HloInstruction::CreateBroadcast(\n ShapeUtil::ChangeElementType(new_input->shape(), PRED),\n body->AddInstruction(HloInstruction::CreateCompare(\n ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig,\n Comparison::Direction::kEq)),\n {}));\n new_input = body->AddInstruction(\n HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect,\n is_first_iter, input, new_input));\n for (HloInstruction* user : original_input_users) {\n TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input));\n }\n TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index,\n cp->mutable_operand(0)));\n TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands(\n cluster->reverse_order_instructions[0]));\n VLOG(2) << \"Moved \" << loop->name() << \" index \" << i;\n changed = true;\n }\n return changed;\n}\nabsl::StatusOr CollectivePermuteMotion::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {\n if (instr->opcode() == HloOpcode::kWhile) {\n TF_ASSIGN_OR_RETURN(bool moved,\n MoveCollectivePermutes(computation, instr));\n changed |= moved;\n }\n }\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/spmd/collective_permute_motion.h\"\n#include \n#include \n#include \"absl/log/log.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/xla_data.pb.h\"\nnamespace xla {\nnamespace {\nusing CollectivePermuteMotionTest = HloTestBase;\nnamespace op = xla::testing::opcode_matchers;\nTEST_F(CollectivePermuteMotionTest, SimpleMove) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)\n }\n cond {\n loop_var = (s32[], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)\n while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_TRUE(pass.Run(&*module).value());\n VLOG(1) << module->ToString();\n const HloInstruction* loop = FindInstruction(module.get(), \"while\");\n const HloInstruction* output =\n loop->while_body()->root_instruction()->operand(1);\n auto input =\n AllOf(op::Shape(\"f32[4,4]\"), op::GetTupleElement(op::Parameter(0)));\n auto cp = op::CollectivePermute(input);\n auto select = op::Select(op::Broadcast(op::Compare()), input, cp);\n EXPECT_THAT(output, op::Multiply(select, select));\n}\nTEST_F(CollectivePermuteMotionTest, NoCollectivePermute) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[], f32[]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[] get-tuple-element(loop_var), index=1\n constant.4 = f32[] constant(4.0)\n ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1)\n }\n cond {\n loop_var = (s32[], f32[], f32[]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[] parameter(0)\n param.1 = f32[] parameter(1)\n tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1)\n while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_FALSE(pass.Run(&*module).value());\n}\nTEST_F(CollectivePermuteMotionTest, MoveWithElementwise) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n constant.4 = f32[] constant(1)\n broadcast = f32[4,4] broadcast(constant.4), dimensions={}\n add1 = f32[4,4] add(cp, broadcast)\n ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)\n }\n cond {\n loop_var = (s32[], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)\n while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_TRUE(pass.Run(&*module).value());\n VLOG(1) << module->ToString();\n const HloInstruction* loop = FindInstruction(module.get(), \"while\");\n const HloInstruction* output =\n loop->while_body()->root_instruction()->operand(1);\n auto input =\n AllOf(op::Shape(\"f32[4,4]\"), op::GetTupleElement(op::Parameter(0)));\n auto moved =\n op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant()));\n auto select = op::Select(op::Broadcast(op::Compare()), input, moved);\n EXPECT_THAT(output, op::Multiply(select, select));\n}\nTEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n constant.4 = f32[] constant(1)\n nonconst = f32[4,4] custom-call(), custom_call_target=\"unknown\"\n add1 = f32[4,4] add(cp, nonconst)\n ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)\n }\n cond {\n loop_var = (s32[], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)\n while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_FALSE(pass.Run(&*module).value());\n}\nTEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)\n }\n cond {\n loop_var = (s32[], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)\n while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body\n ROOT result = f32[4,4] get-tuple-element(while), index=1\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_FALSE(pass.Run(&*module).value());\n}\nTEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n custom = s32[] custom-call(gte0, constant.1), custom_call_target=\"unknown\"\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp)\n }\n cond {\n loop_var = (s32[], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)\n while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_FALSE(pass.Run(&*module).value());\n}\nTEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) {\n absl::string_view hlo_string = R\"(\n HloModule test\n body {\n loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)\n constant.1 = s32[] constant(1)\n gte0 = s32[] get-tuple-element(loop_var), index=0\n add = s32[] add(gte0, constant.1)\n gte1 = f32[4,4] get-tuple-element(loop_var), index=1\n mul = f32[4,4] multiply(gte1, gte1)\n cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}\n ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp)\n }\n cond {\n loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)\n gte.cond = s32[] get-tuple-element(loop_var), index=0\n constant.3 = s32[] constant(5)\n ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT\n }\n ENTRY main {\n constant.2 = s32[] constant(0)\n param = f32[4,4] parameter(0)\n tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param)\n while = (s32[], f32[4,4], f32[4,4]) while(tuple.1),\n condition=cond, body=body\n ROOT result = s32[] get-tuple-element(while), index=0\n }\n)\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n CollectivePermuteMotion pass;\n ASSERT_FALSE(pass.Run(&*module).value());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":255,"cells":{"ID":{"kind":"string","value":"ed71d17b-19eb-4298-88f0-ac7a9381d1f5"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"str_split"},"File Path in Repository":{"kind":"string","value":"absl/strings/str_split.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/strings/str_split_test.cc"},"Code":{"kind":"string","value":"#include \"absl/strings/str_split.h\"\n#include \n#include \n#include \n#include \n#include \"absl/base/config.h\"\n#include \"absl/base/internal/raw_logging.h\"\n#include \"absl/strings/string_view.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace {\ntemplate \nabsl::string_view GenericFind(absl::string_view text,\n absl::string_view delimiter, size_t pos,\n FindPolicy find_policy) {\n if (delimiter.empty() && text.length() > 0) {\n return absl::string_view(text.data() + pos + 1, 0);\n }\n size_t found_pos = absl::string_view::npos;\n absl::string_view found(text.data() + text.size(),\n 0); \n found_pos = find_policy.Find(text, delimiter, pos);\n if (found_pos != absl::string_view::npos) {\n found = absl::string_view(text.data() + found_pos,\n find_policy.Length(delimiter));\n }\n return found;\n}\nstruct LiteralPolicy {\n static size_t Find(absl::string_view text, absl::string_view delimiter,\n size_t pos) {\n return text.find(delimiter, pos);\n }\n static size_t Length(absl::string_view delimiter) {\n return delimiter.length();\n }\n};\nstruct AnyOfPolicy {\n static size_t Find(absl::string_view text, absl::string_view delimiter,\n size_t pos) {\n return text.find_first_of(delimiter, pos);\n }\n static size_t Length(absl::string_view ) { return 1; }\n};\n} \nByString::ByString(absl::string_view sp) : delimiter_(sp) {}\nabsl::string_view ByString::Find(absl::string_view text, size_t pos) const {\n if (delimiter_.length() == 1) {\n size_t found_pos = text.find(delimiter_[0], pos);\n if (found_pos == absl::string_view::npos)\n return absl::string_view(text.data() + text.size(), 0);\n return text.substr(found_pos, 1);\n }\n return GenericFind(text, delimiter_, pos, LiteralPolicy());\n}\nabsl::string_view ByAsciiWhitespace::Find(absl::string_view text,\n size_t pos) const {\n return GenericFind(text, \" \\t\\v\\f\\r\\n\", pos, AnyOfPolicy());\n}\nabsl::string_view ByChar::Find(absl::string_view text, size_t pos) const {\n size_t found_pos = text.find(c_, pos);\n if (found_pos == absl::string_view::npos)\n return absl::string_view(text.data() + text.size(), 0);\n return text.substr(found_pos, 1);\n}\nByAnyChar::ByAnyChar(absl::string_view sp) : delimiters_(sp) {}\nabsl::string_view ByAnyChar::Find(absl::string_view text, size_t pos) const {\n return GenericFind(text, delimiters_, pos, AnyOfPolicy());\n}\nByLength::ByLength(ptrdiff_t length) : length_(length) {\n ABSL_RAW_CHECK(length > 0, \"\");\n}\nabsl::string_view ByLength::Find(absl::string_view text, size_t pos) const {\n pos = std::min(pos, text.size()); \n absl::string_view substr = text.substr(pos);\n if (substr.length() <= static_cast(length_))\n return absl::string_view(text.data() + text.size(), 0);\n return absl::string_view(substr.data() + length_, 0);\n}\nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/strings/str_split.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/base/macros.h\"\n#include \"absl/container/btree_map.h\"\n#include \"absl/container/btree_set.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/node_hash_map.h\"\n#include \"absl/strings/string_view.h\"\nnamespace {\nusing ::testing::ElementsAre;\nusing ::testing::IsEmpty;\nusing ::testing::Pair;\nusing ::testing::UnorderedElementsAre;\nTEST(Split, TraitsTest) {\n static_assert(!absl::strings_internal::SplitterIsConvertibleTo::value,\n \"\");\n static_assert(\n !absl::strings_internal::SplitterIsConvertibleTo::value, \"\");\n static_assert(absl::strings_internal::SplitterIsConvertibleTo<\n std::vector>::value,\n \"\");\n static_assert(\n !absl::strings_internal::SplitterIsConvertibleTo>::value,\n \"\");\n static_assert(absl::strings_internal::SplitterIsConvertibleTo<\n std::vector>::value,\n \"\");\n static_assert(absl::strings_internal::SplitterIsConvertibleTo<\n std::map>::value,\n \"\");\n static_assert(absl::strings_internal::SplitterIsConvertibleTo<\n std::map>::value,\n \"\");\n static_assert(!absl::strings_internal::SplitterIsConvertibleTo<\n std::map>::value,\n \"\");\n static_assert(!absl::strings_internal::SplitterIsConvertibleTo<\n std::map>::value,\n \"\");\n}\nTEST(Split, APIExamples) {\n {\n std::vector v = absl::StrSplit(\"a,b,c\", \",\"); \n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n using absl::ByString;\n v = absl::StrSplit(\"a,b,c\", ByString(\",\"));\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n EXPECT_THAT(absl::StrSplit(\"a,b,c\", ByString(\",\")),\n ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::vector v = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n using absl::ByChar;\n v = absl::StrSplit(\"a,b,c\", ByChar(','));\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n const std::vector v = absl::StrSplit(\"a=>b=>c\", \"=>\");\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::vector v = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::vector v = absl::StrSplit(\",a,b,c,\", ',');\n EXPECT_THAT(v, ElementsAre(\"\", \"a\", \"b\", \"c\", \"\"));\n }\n {\n std::vector v = absl::StrSplit(\"abc\", ',');\n EXPECT_THAT(v, ElementsAre(\"abc\"));\n }\n {\n std::vector v = absl::StrSplit(\"abc\", \"\");\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::string embedded_nulls(\"a\\0b\\0c\", 5);\n std::string null_delim(\"\\0\", 1);\n std::vector v = absl::StrSplit(embedded_nulls, null_delim);\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::pair p = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_EQ(\"a\", p.first);\n EXPECT_EQ(\"b\", p.second);\n }\n {\n std::set v = absl::StrSplit(\"a,b,c,a,b,c,a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n char a[] = \",\";\n char* d = a + 0;\n std::vector v = absl::StrSplit(\"a,b,c\", d);\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n using absl::ByAnyChar;\n std::vector v = absl::StrSplit(\"a,b;c\", ByAnyChar(\",;\"));\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n using absl::SkipWhitespace;\n std::vector v =\n absl::StrSplit(\" a , ,,b,\", ',', SkipWhitespace());\n EXPECT_THAT(v, ElementsAre(\" a \", \"b\"));\n }\n {\n using absl::ByLength;\n std::vector v = absl::StrSplit(\"abcdefg\", ByLength(3));\n EXPECT_THAT(v, ElementsAre(\"abc\", \"def\", \"g\"));\n }\n {\n std::vector v1 = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v1, ElementsAre(\"a\", \"b\", \"c\"));\n std::vector v2(absl::StrSplit(\"a,b,c\", ','));\n EXPECT_THAT(v2, ElementsAre(\"a\", \"b\", \"c\"));\n auto v3 = std::vector(absl::StrSplit(\"a,b,c\", ','));\n EXPECT_THAT(v3, ElementsAre(\"a\", \"b\", \"c\"));\n v3 = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v3, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::map m = absl::StrSplit(\"a,1,b,2,a,3\", ',');\n EXPECT_EQ(2, m.size());\n EXPECT_EQ(\"3\", m[\"a\"]);\n EXPECT_EQ(\"2\", m[\"b\"]);\n }\n {\n std::multimap m =\n absl::StrSplit(\"a,1,b,2,a,3\", ',');\n EXPECT_EQ(3, m.size());\n auto it = m.find(\"a\");\n EXPECT_EQ(\"1\", it->second);\n ++it;\n EXPECT_EQ(\"3\", it->second);\n it = m.find(\"b\");\n EXPECT_EQ(\"2\", it->second);\n }\n {\n std::string s = \"x,x,x,x,x,x,x\";\n for (absl::string_view sp : absl::StrSplit(s, ',')) {\n EXPECT_EQ(\"x\", sp);\n }\n }\n {\n using absl::SkipWhitespace;\n std::string s = \" ,x,,x,,x,x,x,,\";\n for (absl::string_view sp : absl::StrSplit(s, ',', SkipWhitespace())) {\n EXPECT_EQ(\"x\", sp);\n }\n }\n {\n std::map m;\n for (absl::string_view sp : absl::StrSplit(\"a=b=c,d=e,f=,g\", ',')) {\n m.insert(absl::StrSplit(sp, absl::MaxSplits('=', 1)));\n }\n EXPECT_EQ(\"b=c\", m.find(\"a\")->second);\n EXPECT_EQ(\"e\", m.find(\"d\")->second);\n EXPECT_EQ(\"\", m.find(\"f\")->second);\n EXPECT_EQ(\"\", m.find(\"g\")->second);\n }\n}\nTEST(SplitIterator, Basics) {\n auto splitter = absl::StrSplit(\"a,b\", ',');\n auto it = splitter.begin();\n auto end = splitter.end();\n EXPECT_NE(it, end);\n EXPECT_EQ(\"a\", *it); \n ++it; \n EXPECT_NE(it, end);\n EXPECT_EQ(\"b\",\n std::string(it->data(), it->size())); \n it++; \n EXPECT_EQ(it, end);\n}\nclass Skip {\n public:\n explicit Skip(const std::string& s) : s_(s) {}\n bool operator()(absl::string_view sp) { return sp != s_; }\n private:\n std::string s_;\n};\nTEST(SplitIterator, Predicate) {\n auto splitter = absl::StrSplit(\"a,b,c\", ',', Skip(\"b\"));\n auto it = splitter.begin();\n auto end = splitter.end();\n EXPECT_NE(it, end);\n EXPECT_EQ(\"a\", *it); \n ++it; \n EXPECT_NE(it, end);\n EXPECT_EQ(\"c\",\n std::string(it->data(), it->size())); \n it++; \n EXPECT_EQ(it, end);\n}\nTEST(SplitIterator, EdgeCases) {\n struct {\n std::string in;\n std::vector expect;\n } specs[] = {\n {\"\", {\"\"}},\n {\"foo\", {\"foo\"}},\n {\",\", {\"\", \"\"}},\n {\",foo\", {\"\", \"foo\"}},\n {\"foo,\", {\"foo\", \"\"}},\n {\",foo,\", {\"\", \"foo\", \"\"}},\n {\"foo,bar\", {\"foo\", \"bar\"}},\n };\n for (const auto& spec : specs) {\n SCOPED_TRACE(spec.in);\n auto splitter = absl::StrSplit(spec.in, ',');\n auto it = splitter.begin();\n auto end = splitter.end();\n for (const auto& expected : spec.expect) {\n EXPECT_NE(it, end);\n EXPECT_EQ(expected, *it++);\n }\n EXPECT_EQ(it, end);\n }\n}\nTEST(Splitter, Const) {\n const auto splitter = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(splitter, ElementsAre(\"a\", \"b\", \"c\"));\n}\nTEST(Split, EmptyAndNull) {\n EXPECT_THAT(absl::StrSplit(absl::string_view(\"\"), '-'), ElementsAre(\"\"));\n EXPECT_THAT(absl::StrSplit(absl::string_view(), '-'), ElementsAre());\n}\nTEST(SplitIterator, EqualityAsEndCondition) {\n auto splitter = absl::StrSplit(\"a,b,c\", ',');\n auto it = splitter.begin();\n auto it2 = it;\n ++it2;\n ++it2;\n EXPECT_EQ(\"c\", *it2);\n std::vector v;\n for (; it != it2; ++it) {\n v.push_back(*it);\n }\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n}\nTEST(Splitter, RangeIterators) {\n auto splitter = absl::StrSplit(\"a,b,c\", ',');\n std::vector output;\n for (absl::string_view p : splitter) {\n output.push_back(p);\n }\n EXPECT_THAT(output, ElementsAre(\"a\", \"b\", \"c\"));\n}\ntemplate \nvoid TestConversionOperator(const Splitter& splitter) {\n ContainerType output = splitter;\n EXPECT_THAT(output, UnorderedElementsAre(\"a\", \"b\", \"c\", \"d\"));\n}\ntemplate \nvoid TestMapConversionOperator(const Splitter& splitter) {\n MapType m = splitter;\n EXPECT_THAT(m, UnorderedElementsAre(Pair(\"a\", \"b\"), Pair(\"c\", \"d\")));\n}\ntemplate \nvoid TestPairConversionOperator(const Splitter& splitter) {\n std::pair p = splitter;\n EXPECT_EQ(p, (std::pair(\"a\", \"b\")));\n}\nTEST(Splitter, ConversionOperator) {\n auto splitter = absl::StrSplit(\"a,b,c,d\", ',');\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestConversionOperator>(splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(splitter);\n TestMapConversionOperator>(splitter);\n TestMapConversionOperator>(splitter);\n TestMapConversionOperator<\n std::multimap>(splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(splitter);\n TestMapConversionOperator<\n absl::btree_map>(splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator<\n absl::btree_multimap>(splitter);\n TestMapConversionOperator<\n absl::btree_multimap>(splitter);\n TestMapConversionOperator<\n absl::btree_multimap>(splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator>(\n splitter);\n TestMapConversionOperator<\n absl::node_hash_map>(splitter);\n TestMapConversionOperator<\n absl::node_hash_map>(splitter);\n TestMapConversionOperator<\n absl::node_hash_map>(splitter);\n TestMapConversionOperator<\n absl::flat_hash_map>(splitter);\n TestMapConversionOperator<\n absl::flat_hash_map>(splitter);\n TestMapConversionOperator<\n absl::flat_hash_map>(splitter);\n TestPairConversionOperator(splitter);\n TestPairConversionOperator(splitter);\n TestPairConversionOperator(splitter);\n TestPairConversionOperator(splitter);\n}\nTEST(Splitter, ToPair) {\n {\n std::pair p = absl::StrSplit(\"\", ',');\n EXPECT_EQ(\"\", p.first);\n EXPECT_EQ(\"\", p.second);\n }\n {\n std::pair p = absl::StrSplit(\"a\", ',');\n EXPECT_EQ(\"a\", p.first);\n EXPECT_EQ(\"\", p.second);\n }\n {\n std::pair p = absl::StrSplit(\",b\", ',');\n EXPECT_EQ(\"\", p.first);\n EXPECT_EQ(\"b\", p.second);\n }\n {\n std::pair p = absl::StrSplit(\"a,b\", ',');\n EXPECT_EQ(\"a\", p.first);\n EXPECT_EQ(\"b\", p.second);\n }\n {\n std::pair p = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_EQ(\"a\", p.first);\n EXPECT_EQ(\"b\", p.second);\n }\n}\nTEST(Splitter, Predicates) {\n static const char kTestChars[] = \",a, ,b,\";\n using absl::AllowEmpty;\n using absl::SkipEmpty;\n using absl::SkipWhitespace;\n {\n auto splitter = absl::StrSplit(kTestChars, ',');\n std::vector v = splitter;\n EXPECT_THAT(v, ElementsAre(\"\", \"a\", \" \", \"b\", \"\"));\n }\n {\n auto splitter = absl::StrSplit(kTestChars, ',', AllowEmpty());\n std::vector v_allowempty = splitter;\n EXPECT_THAT(v_allowempty, ElementsAre(\"\", \"a\", \" \", \"b\", \"\"));\n auto splitter_nopredicate = absl::StrSplit(kTestChars, ',');\n std::vector v_nopredicate = splitter_nopredicate;\n EXPECT_EQ(v_allowempty, v_nopredicate);\n }\n {\n auto splitter = absl::StrSplit(kTestChars, ',', SkipEmpty());\n std::vector v = splitter;\n EXPECT_THAT(v, ElementsAre(\"a\", \" \", \"b\"));\n }\n {\n auto splitter = absl::StrSplit(kTestChars, ',', SkipWhitespace());\n std::vector v = splitter;\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n}\nTEST(Split, Basics) {\n {\n absl::StrSplit(\"a,b,c\", ',');\n }\n {\n std::vector v = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::vector v = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n }\n {\n std::vector v;\n v = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\", \"c\"));\n std::map m;\n m = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_EQ(2, m.size());\n std::unordered_map hm;\n hm = absl::StrSplit(\"a,b,c\", ',');\n EXPECT_EQ(2, hm.size());\n }\n}\nabsl::string_view ReturnStringView() { return \"Hello World\"; }\nconst char* ReturnConstCharP() { return \"Hello World\"; }\nchar* ReturnCharP() { return const_cast(\"Hello World\"); }\nTEST(Split, AcceptsCertainTemporaries) {\n std::vector v;\n v = absl::StrSplit(ReturnStringView(), ' ');\n EXPECT_THAT(v, ElementsAre(\"Hello\", \"World\"));\n v = absl::StrSplit(ReturnConstCharP(), ' ');\n EXPECT_THAT(v, ElementsAre(\"Hello\", \"World\"));\n v = absl::StrSplit(ReturnCharP(), ' ');\n EXPECT_THAT(v, ElementsAre(\"Hello\", \"World\"));\n}\nTEST(Split, Temporary) {\n const char input[] = \"a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u\";\n EXPECT_LT(sizeof(std::string), ABSL_ARRAYSIZE(input))\n << \"Input should be larger than fits on the stack.\";\n auto splitter = absl::StrSplit(std::string(input), ',');\n std::string expected = \"a\";\n for (absl::string_view letter : splitter) {\n EXPECT_EQ(expected, letter);\n ++expected[0];\n }\n EXPECT_EQ(\"v\", expected);\n auto std_splitter = absl::StrSplit(std::string(input), ',');\n expected = \"a\";\n for (absl::string_view letter : std_splitter) {\n EXPECT_EQ(expected, letter);\n ++expected[0];\n }\n EXPECT_EQ(\"v\", expected);\n}\ntemplate \nstatic std::unique_ptr CopyToHeap(const T& value) {\n return std::unique_ptr(new T(value));\n}\nTEST(Split, LvalueCaptureIsCopyable) {\n std::string input = \"a,b\";\n auto heap_splitter = CopyToHeap(absl::StrSplit(input, ','));\n auto stack_splitter = *heap_splitter;\n heap_splitter.reset();\n std::vector result = stack_splitter;\n EXPECT_THAT(result, testing::ElementsAre(\"a\", \"b\"));\n}\nTEST(Split, TemporaryCaptureIsCopyable) {\n auto heap_splitter = CopyToHeap(absl::StrSplit(std::string(\"a,b\"), ','));\n auto stack_splitter = *heap_splitter;\n heap_splitter.reset();\n std::vector result = stack_splitter;\n EXPECT_THAT(result, testing::ElementsAre(\"a\", \"b\"));\n}\nTEST(Split, SplitterIsCopyableAndMoveable) {\n auto a = absl::StrSplit(\"foo\", '-');\n auto b = a; \n auto c = std::move(a); \n b = c; \n c = std::move(b); \n EXPECT_THAT(c, ElementsAre(\"foo\"));\n}\nTEST(Split, StringDelimiter) {\n {\n std::vector v = absl::StrSplit(\"a,b\", ',');\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n {\n std::vector v = absl::StrSplit(\"a,b\", std::string(\",\"));\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n {\n std::vector v =\n absl::StrSplit(\"a,b\", absl::string_view(\",\"));\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n}\n#if !defined(__cpp_char8_t)\n#if defined(__clang__)\n#pragma clang diagnostic push\n#pragma clang diagnostic ignored \"-Wc++2a-compat\"\n#endif\nTEST(Split, UTF8) {\n std::string utf8_string = u8\"\\u03BA\\u1F79\\u03C3\\u03BC\\u03B5\";\n {\n std::string to_split = \"a,\" + utf8_string;\n std::vector v = absl::StrSplit(to_split, ',');\n EXPECT_THAT(v, ElementsAre(\"a\", utf8_string));\n }\n {\n std::string to_split = \"a,\" + utf8_string + \",b\";\n std::string unicode_delimiter = \",\" + utf8_string + \",\";\n std::vector v =\n absl::StrSplit(to_split, unicode_delimiter);\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n {\n std::vector v =\n absl::StrSplit(u8\"Foo h\\u00E4llo th\\u4E1Ere\", absl::ByAnyChar(\" \\t\"));\n EXPECT_THAT(v, ElementsAre(\"Foo\", u8\"h\\u00E4llo\", u8\"th\\u4E1Ere\"));\n }\n}\n#if defined(__clang__)\n#pragma clang diagnostic pop\n#endif\n#endif \nTEST(Split, EmptyStringDelimiter) {\n {\n std::vector v = absl::StrSplit(\"\", \"\");\n EXPECT_THAT(v, ElementsAre(\"\"));\n }\n {\n std::vector v = absl::StrSplit(\"a\", \"\");\n EXPECT_THAT(v, ElementsAre(\"a\"));\n }\n {\n std::vector v = absl::StrSplit(\"ab\", \"\");\n EXPECT_THAT(v, ElementsAre(\"a\", \"b\"));\n }\n {\n std::vector v = absl::StrSplit(\"a b\", \"\");\n EXPECT_THAT(v, ElementsAre(\"a\", \" \", \"b\"));\n }\n}\nTEST(Split, SubstrDelimiter) {\n std::vector results;\n absl::string_view delim(\"\n results = absl::StrSplit(\"\", delim);\n EXPECT_THAT(results, ElementsAre(\"\"));\n results = absl::StrSplit(\"\n EXPECT_THAT(results, ElementsAre(\"\", \"\"));\n results = absl::StrSplit(\"ab\", delim);\n EXPECT_THAT(results, ElementsAre(\"ab\"));\n results = absl::StrSplit(\"ab\n EXPECT_THAT(results, ElementsAre(\"ab\", \"\"));\n results = absl::StrSplit(\"ab/\", delim);\n EXPECT_THAT(results, ElementsAre(\"ab/\"));\n results = absl::StrSplit(\"a/b\", delim);\n EXPECT_THAT(results, ElementsAre(\"a/b\"));\n results = absl::StrSplit(\"a\n EXPECT_THAT(results, ElementsAre(\"a\", \"b\"));\n results = absl::StrSplit(\"a\n EXPECT_THAT(results, ElementsAre(\"a\", \"/b\"));\n results = absl::StrSplit(\"a\n EXPECT_THAT(results, ElementsAre(\"a\", \"\", \"b\"));\n}\nTEST(Split, EmptyResults) {\n std::vector results;\n results = absl::StrSplit(\"\", '#');\n EXPECT_THAT(results, ElementsAre(\"\"));\n results = absl::StrSplit(\"#\", '#');\n EXPECT_THAT(results, ElementsAre(\"\", \"\"));\n results = absl::StrSplit(\"#cd\", '#');\n EXPECT_THAT(results, ElementsAre(\"\", \"cd\"));\n results = absl::StrSplit(\"ab#cd#\", '#');\n EXPECT_THAT(results, ElementsAre(\"ab\", \"cd\", \"\"));\n results = absl::StrSplit(\"ab##cd\", '#');\n EXPECT_THAT(results, ElementsAre(\"ab\", \"\", \"cd\"));\n results = absl::StrSplit(\"ab##\", '#');\n EXPECT_THAT(results, ElementsAre(\"ab\", \"\", \"\"));\n results = absl::StrSplit(\"ab#ab#\", '#');\n EXPECT_THAT(results, ElementsAre(\"ab\", \"ab\", \"\"));\n results = absl::StrSplit(\"aaaa\", 'a');\n EXPECT_THAT(results, ElementsAre(\"\", \"\", \"\", \"\", \"\"));\n results = absl::StrSplit(\"\", '#', absl::SkipEmpty());\n EXPECT_THAT(results, ElementsAre());\n}\ntemplate \nstatic bool IsFoundAtStartingPos(absl::string_view text, Delimiter d,\n size_t starting_pos, int expected_pos) {\n absl::string_view found = d.Find(text, starting_pos);\n return found.data() != text.data() + text.size() &&\n expected_pos == found.data() - text.data();\n}\ntemplate \nstatic bool IsFoundAt(absl::string_view text, Delimiter d, int expected_pos) {\n const std::string leading_text = \",x,y,z,\";\n return IsFoundAtStartingPos(text, d, 0, expected_pos) &&\n IsFoundAtStartingPos(leading_text + std::string(text), d,\n leading_text.length(),\n expected_pos + leading_text.length());\n}\ntemplate \nvoid TestComma(Delimiter d) {\n EXPECT_TRUE(IsFoundAt(\",\", d, 0));\n EXPECT_TRUE(IsFoundAt(\"a,\", d, 1));\n EXPECT_TRUE(IsFoundAt(\",b\", d, 0));\n EXPECT_TRUE(IsFoundAt(\"a,b\", d, 1));\n EXPECT_TRUE(IsFoundAt(\"a,b,\", d, 1));\n EXPECT_TRUE(IsFoundAt(\"a,b,c\", d, 1));\n EXPECT_FALSE(IsFoundAt(\"\", d, -1));\n EXPECT_FALSE(IsFoundAt(\" \", d, -1));\n EXPECT_FALSE(IsFoundAt(\"a\", d, -1));\n EXPECT_FALSE(IsFoundAt(\"a b c\", d, -1));\n EXPECT_FALSE(IsFoundAt(\"a;b;c\", d, -1));\n EXPECT_FALSE(IsFoundAt(\";\", d, -1));\n}\nTEST(Delimiter, ByString) {\n using absl::ByString;\n TestComma(ByString(\",\"));\n ByString comma_string(\",\");\n TestComma(comma_string);\n absl::string_view abc(\"abc\");\n EXPECT_EQ(0, abc.find(\"\")); \n ByString empty(\"\");\n EXPECT_FALSE(IsFoundAt(\"\", empty, 0));\n EXPECT_FALSE(IsFoundAt(\"a\", empty, 0));\n EXPECT_TRUE(IsFoundAt(\"ab\", empty, 1));\n EXPECT_TRUE(IsFoundAt(\"abc\", empty, 1));\n}\nTEST(Split, ByChar) {\n using absl::ByChar;\n TestComma(ByChar(','));\n ByChar comma_char(',');\n TestComma(comma_char);\n}\nTEST(Delimiter, ByAnyChar) {\n using absl::ByAnyChar;\n ByAnyChar one_delim(\",\");\n EXPECT_TRUE(IsFoundAt(\",\", one_delim, 0));\n EXPECT_TRUE(IsFoundAt(\"a,\", one_delim, 1));\n EXPECT_TRUE(IsFoundAt(\"a,b\", one_delim, 1));\n EXPECT_TRUE(IsFoundAt(\",b\", one_delim, 0));\n EXPECT_FALSE(IsFoundAt(\"\", one_delim, -1));\n EXPECT_FALSE(IsFoundAt(\" \", one_delim, -1));\n EXPECT_FALSE(IsFoundAt(\"a\", one_delim, -1));\n EXPECT_FALSE(IsFoundAt(\"a;b;c\", one_delim, -1));\n EXPECT_FALSE(IsFoundAt(\";\", one_delim, -1));\n ByAnyChar two_delims(\",;\");\n EXPECT_TRUE(IsFoundAt(\",\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\";\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\",;\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\";,\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\",;b\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\";,b\", two_delims, 0));\n EXPECT_TRUE(IsFoundAt(\"a;,\", two_delims, 1));\n EXPECT_TRUE(IsFoundAt(\"a,;\", two_delims, 1));\n EXPECT_TRUE(IsFoundAt(\"a;,b\", two_delims, 1));\n EXPECT_TRUE(IsFoundAt(\"a,;b\", two_delims, 1));\n EXPECT_FALSE(IsFoundAt(\"\", two_delims, -1));\n EXPECT_FALSE(IsFoundAt(\" \", two_delims, -1));\n EXPECT_FALSE(IsFoundAt(\"a\", two_delims, -1));\n EXPECT_FALSE(IsFoundAt(\"a=b=c\", two_delims, -1));\n EXPECT_FALSE(IsFoundAt(\"=\", two_delims, -1));\n ByAnyChar empty(\"\");\n EXPECT_FALSE(IsFoundAt(\"\", empty, 0));\n EXPECT_FALSE(IsFoundAt(\"a\", empty, 0));\n EXPECT_TRUE(IsFoundAt(\"ab\", empty, 1));\n EXPECT_TRUE(IsFoundAt(\"abc\", empty, 1));\n}\nTEST(Split, ByAsciiWhitespace) {\n using absl::ByAsciiWhitespace;\n using absl::SkipEmpty;\n std::vector results;\n results = absl::StrSplit(\"aaaa\\n\", ByAsciiWhitespace());\n EXPECT_THAT(results, ElementsAre(\"aaaa\", \"\"));\n results = absl::StrSplit(\"aaaa\\n\", ByAsciiWhitespace(), SkipEmpty());\n EXPECT_THAT(results, ElementsAre(\"aaaa\"));\n results = absl::StrSplit(\" \", ByAsciiWhitespace());\n EXPECT_THAT(results, ElementsAre(\"\", \"\"));\n results = absl::StrSplit(\" \", ByAsciiWhitespace(), SkipEmpty());\n EXPECT_THAT(results, IsEmpty());\n results = absl::StrSplit(\"a\", ByAsciiWhitespace());\n EXPECT_THAT(results, ElementsAre(\"a\"));\n results = absl::StrSplit(\"\", ByAsciiWhitespace());\n EXPECT_THAT(results, ElementsAre(\"\"));\n results = absl::StrSplit(\"\", ByAsciiWhitespace(), SkipEmpty());\n EXPECT_THAT(results, IsEmpty());\n results = absl::StrSplit(\"a b\\tc\\n d\\n\", ByAsciiWhitespace());\n EXPECT_THAT(results, ElementsAre(\"a\", \"b\", \"c\", \"\", \"\", \"d\", \"\"));\n results = absl::StrSplit(\"a b\\tc\\n d \\n\", ByAsciiWhitespace(), SkipEmpty());\n EXPECT_THAT(results, ElementsAre(\"a\", \"b\", \"c\", \"d\"));\n results = absl::StrSplit(\"a\\t\\n\\v\\f\\r b\", ByAsciiWhitespace(), SkipEmpty());\n EXPECT_THAT(results, ElementsAre(\"a\", \"b\"));\n}\nTEST(Delimiter, ByLength) {\n using absl::ByLength;\n ByLength four_char_delim(4);\n EXPECT_TRUE(IsFoundAt(\"abcde\", four_char_delim, 4));\n EXPECT_TRUE(IsFoundAt(\"abcdefghijklmnopqrstuvwxyz\", four_char_delim, 4));\n EXPECT_TRUE(IsFoundAt(\"a b,c\\nd\", four_char_delim, 4));\n EXPECT_FALSE(IsFoundAt(\"\", four_char_delim, 0));\n EXPECT_FALSE(IsFoundAt(\"a\", four_char_delim, 0));\n EXPECT_FALSE(IsFoundAt(\"ab\", four_char_delim, 0));\n EXPECT_FALSE(IsFoundAt(\"abc\", four_char_delim, 0));\n EXPECT_FALSE(IsFoundAt(\"abcd\", four_char_delim, 0));\n}\nTEST(Split, WorksWithLargeStrings) {\n#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \\\n defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)\n constexpr size_t kSize = (uint32_t{1} << 26) + 1; \n#else\n constexpr size_t kSize = (uint32_t{1} << 31) + 1; \n#endif\n if (sizeof(size_t) > 4) {\n std::string s(kSize, 'x');\n s.back() = '-';\n std::vector v = absl::StrSplit(s, '-');\n EXPECT_EQ(2, v.size());\n EXPECT_EQ('x', v[0][0]);\n EXPECT_EQ('x', v[0][1]);\n EXPECT_EQ('x', v[0][3]);\n EXPECT_EQ(\"\", v[1]);\n }\n}\nTEST(SplitInternalTest, TypeTraits) {\n EXPECT_FALSE(absl::strings_internal::HasMappedType::value);\n EXPECT_TRUE(\n (absl::strings_internal::HasMappedType>::value));\n EXPECT_FALSE(absl::strings_internal::HasValueType::value);\n EXPECT_TRUE(\n (absl::strings_internal::HasValueType>::value));\n EXPECT_FALSE(absl::strings_internal::HasConstIterator::value);\n EXPECT_TRUE(\n (absl::strings_internal::HasConstIterator>::value));\n EXPECT_FALSE(absl::strings_internal::IsInitializerList::value);\n EXPECT_TRUE((absl::strings_internal::IsInitializerList<\n std::initializer_list>::value));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":256,"cells":{"ID":{"kind":"string","value":"22062a52-3ab1-495e-b0d6-3265e90401e0"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"memutil"},"File Path in Repository":{"kind":"string","value":"absl/strings/internal/memutil.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/strings/internal/memutil_test.cc"},"Code":{"kind":"string","value":"#include \"absl/strings/internal/memutil.h\"\n#include \n#include \"absl/strings/ascii.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace strings_internal {\nint memcasecmp(const char* s1, const char* s2, size_t len) {\n const unsigned char* us1 = reinterpret_cast(s1);\n const unsigned char* us2 = reinterpret_cast(s2);\n for (size_t i = 0; i < len; i++) {\n unsigned char c1 = us1[i];\n unsigned char c2 = us2[i];\n if (c1 != c2) {\n c1 = c1 >= 'A' && c1 <= 'Z' ? c1 - 'A' + 'a' : c1;\n c2 = c2 >= 'A' && c2 <= 'Z' ? c2 - 'A' + 'a' : c2;\n const int diff = int{c1} - int{c2};\n if (diff != 0) return diff;\n }\n }\n return 0;\n}\n} \nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/strings/internal/memutil.h\"\n#include \n#include \"gtest/gtest.h\"\nnamespace {\nTEST(MemUtil, memcasecmp) {\n const char a[] = \"hello there\";\n EXPECT_EQ(absl::strings_internal::memcasecmp(a, \"heLLO there\",\n sizeof(\"hello there\") - 1),\n 0);\n EXPECT_EQ(absl::strings_internal::memcasecmp(a, \"heLLO therf\",\n sizeof(\"hello there\") - 1),\n -1);\n EXPECT_EQ(absl::strings_internal::memcasecmp(a, \"heLLO therf\",\n sizeof(\"hello there\") - 2),\n 0);\n EXPECT_EQ(absl::strings_internal::memcasecmp(a, \"whatever\", 0), 0);\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":257,"cells":{"ID":{"kind":"string","value":"fcc9cb62-4002-4ced-aa88-1c3431b4b183"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"unbounded_thread_pool"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/data/unbounded_thread_pool.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/data/unbounded_thread_pool_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/data/unbounded_thread_pool.h\"\n#include \n#include \n#include \n#include \"absl/memory/memory.h\"\n#include \"tensorflow/core/framework/dataset.h\"\n#include \"tensorflow/core/lib/core/notification.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/resource.h\"\n#include \"tensorflow/core/platform/unbounded_work_queue.h\"\nnamespace tensorflow {\nnamespace data {\nclass UnboundedThreadPool::LogicalThreadWrapper : public Thread {\n public:\n explicit LogicalThreadWrapper(std::shared_ptr done)\n : done_(std::move(done)) {}\n ~LogicalThreadWrapper() override {\n done_->WaitForNotification();\n }\n private:\n std::shared_ptr done_;\n};\nclass UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory {\n public:\n explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {}\n std::unique_ptr StartThread(const string& name,\n std::function fn) override {\n auto done = std::make_shared();\n pool_->ScheduleOnWorkQueue(std::move(fn), done);\n return std::make_unique(std::move(done));\n }\n private:\n UnboundedThreadPool* const pool_; \n};\nstd::shared_ptr UnboundedThreadPool::get_thread_factory() {\n return std::make_shared(this);\n}\nvoid UnboundedThreadPool::Schedule(std::function fn) {\n auto tagged_fn = [fn = std::move(fn)]() {\n tensorflow::ResourceTagger tag(kTFDataResourceTag, \"ThreadPool\");\n fn();\n };\n ScheduleOnWorkQueue(std::move(tagged_fn), nullptr);\n}\nint UnboundedThreadPool::NumThreads() const { return -1; }\nint UnboundedThreadPool::CurrentThreadId() const { return -1; }\nnamespace {\nvoid WorkQueueFunc(const std::function& fn,\n std::shared_ptr done) {\n fn();\n if (done) {\n done->Notify();\n }\n}\n} \nvoid UnboundedThreadPool::ScheduleOnWorkQueue(\n std::function fn, std::shared_ptr done) {\n unbounded_work_queue_.Schedule(\n std::bind(&WorkQueueFunc, std::move(fn), std::move(done)));\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/data/unbounded_thread_pool.h\"\n#include \n#include \n#include \n#include \"tensorflow/core/lib/random/random.h\"\n#include \"tensorflow/core/platform/blocking_counter.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nTEST(UnboundedThreadPool, ConcurrentThreadCreation) {\n UnboundedThreadPool pool(Env::Default(), \"test\");\n auto thread_factory = pool.get_thread_factory();\n std::vector> threads;\n const int kNumThreadsToCreate = 10;\n std::atomic i(0);\n for (int j = 0; j < kNumThreadsToCreate; ++j) {\n threads.push_back(thread_factory->StartThread(\"\", [=, &i,\n &thread_factory]() {\n std::vector> nested_threads;\n for (int k = 0; k < kNumThreadsToCreate; ++k) {\n nested_threads.push_back(\n thread_factory->StartThread(\"\", [&i]() { ++i; }));\n }\n nested_threads.clear();\n }));\n }\n threads.clear();\n EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate);\n}\nTEST(UnboundedThreadPool, MultipleBlockingThreads) {\n UnboundedThreadPool pool(Env::Default(), \"test\");\n auto thread_factory = pool.get_thread_factory();\n std::vector> threads;\n std::vector round_sizes = {5, 10, 15, 20};\n for (const int round_size : round_sizes) {\n Notification n;\n BlockingCounter bc(round_size);\n for (int j = 0; j < round_size; ++j) {\n threads.push_back(thread_factory->StartThread(\"\", [&bc, &n]() {\n bc.DecrementCount();\n n.WaitForNotification();\n }));\n }\n bc.Wait();\n n.Notify();\n threads.clear();\n }\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":258,"cells":{"ID":{"kind":"string","value":"3bb57b45-0742-480e-8a24-200b67ed49fa"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"hlo_element_type_converter"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/hlo_element_type_converter.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/hlo_element_type_converter_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/hlo_element_type_converter.h\"\n#include \n#include \n#include \n#include \n#include \"xla/hlo/evaluator/hlo_evaluator.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/literal.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/types.h\"\n#include \"tsl/platform/errors.h\"\nnamespace xla {\nnamespace {\nHloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) {\n if (hlo->shape().element_type() != type) {\n Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);\n hlo = hlo->parent()->AddInstruction(\n HloInstruction::CreateConvert(shape, hlo));\n }\n CHECK_EQ(hlo->shape().element_type(), type);\n return hlo;\n}\nbool HasOperandType(HloInstruction* hlo, PrimitiveType type) {\n for (HloInstruction* operand : hlo->operands()) {\n if (operand->shape().element_type() == type) {\n return true;\n }\n }\n return false;\n}\nShape GetConvertedTupleShape(const Shape& shape, PrimitiveType from_type,\n PrimitiveType to_type) {\n std::vector new_tuple_subshapes;\n const int64_t n = ShapeUtil::TupleElementCount(shape);\n new_tuple_subshapes.reserve(n);\n for (int64_t i = 0; i < n; ++i) {\n Shape subshape = ShapeUtil::GetTupleElementShape(shape, i);\n CHECK(!subshape.IsTuple());\n if (subshape.element_type() == from_type) {\n subshape = ShapeUtil::ChangeElementType(subshape, to_type);\n }\n new_tuple_subshapes.push_back(subshape);\n }\n return ShapeUtil::MakeTupleShape(new_tuple_subshapes);\n}\nHloInstruction* ConvertTupleElements(HloInstruction* hlo,\n const Shape& to_shape) {\n const Shape& shape = hlo->shape();\n HloComputation* computation = hlo->parent();\n std::vector tuple_elements;\n for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {\n const Shape& ele_shape = ShapeUtil::GetTupleElementShape(shape, i);\n HloInstruction* element = computation->AddInstruction(\n HloInstruction::CreateGetTupleElement(ele_shape, hlo, i));\n const Shape& to_ele_shape = ShapeUtil::GetTupleElementShape(to_shape, i);\n CHECK(!ele_shape.IsTuple());\n if (ele_shape.element_type() != to_ele_shape.element_type()) {\n element = computation->AddInstruction(\n HloInstruction::CreateConvert(to_ele_shape, element));\n }\n tuple_elements.push_back(element);\n }\n return computation->AddInstruction(\n HloInstruction::CreateTuple(tuple_elements));\n}\n} \nHloElementTypeConverter::HloElementTypeConverter(\n PrimitiveType eliminate_type, PrimitiveType replace_with_type)\n : eliminate_type_(eliminate_type), replace_with_type_(replace_with_type) {}\nabsl::StatusOr HloElementTypeConverter::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n XLA_VLOG_LINES(\n 3, \"HloElementTypeConverter::Run(), before:\\n\" + module->ToString());\n if (eliminate_type_ == replace_with_type_) {\n return false;\n }\n HloCloneContext context(module);\n bool changed = false;\n for (auto* computation : module->computations(execution_threads)) {\n for (auto* hlo : computation->MakeInstructionPostOrder()) {\n const auto opcode = hlo->opcode();\n if (opcode == HloOpcode::kParameter || opcode == HloOpcode::kConstant ||\n opcode == HloOpcode::kTuple || opcode == HloOpcode::kConvert ||\n opcode == HloOpcode::kBitcastConvert ||\n opcode == HloOpcode::kGetTupleElement ||\n opcode == HloOpcode::kInfeed || opcode == HloOpcode::kOutfeed) {\n continue;\n }\n if (opcode == HloOpcode::kCustomCall) {\n continue;\n }\n if (opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall ||\n opcode == HloOpcode::kAllReduce ||\n opcode == HloOpcode::kReduceScatter ||\n opcode == HloOpcode::kAllReduceStart ||\n opcode == HloOpcode::kFusion || opcode == HloOpcode::kMap ||\n opcode == HloOpcode::kReduce || opcode == HloOpcode::kReduceWindow ||\n opcode == HloOpcode::kScatter ||\n opcode == HloOpcode::kSelectAndScatter ||\n opcode == HloOpcode::kSort || opcode == HloOpcode::kConditional) {\n continue;\n }\n TF_RET_CHECK(hlo->called_computations().empty()) << hlo->ToString();\n bool nullary = hlo->operands().empty();\n bool wrong_element_type = hlo->shape().element_type() == eliminate_type_;\n bool should_eliminate_type = (nullary && wrong_element_type) ||\n HasOperandType(hlo, eliminate_type_);\n if (!should_eliminate_type) {\n TF_RET_CHECK(hlo->shape().element_type() != eliminate_type_);\n continue;\n }\n std::vector new_operands;\n const auto& operands = hlo->operands();\n new_operands.reserve(operands.size());\n for (HloInstruction* operand : operands) {\n if (operand->shape().element_type() == eliminate_type_) {\n operand = ToElementType(operand, replace_with_type_);\n }\n new_operands.push_back(operand);\n }\n HloInstruction* new_hlo;\n if (hlo->shape().element_type() == eliminate_type_) {\n Shape shape =\n ShapeUtil::ChangeElementType(hlo->shape(), replace_with_type_);\n new_hlo = computation->AddInstruction(\n hlo->CloneWithNewOperands(shape, new_operands, &context));\n TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));\n new_hlo = ToElementType(new_hlo, eliminate_type_);\n } else if (hlo->shape().IsTuple()) {\n Shape old_shape = hlo->shape();\n Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_,\n replace_with_type_);\n new_hlo = computation->AddInstruction(\n hlo->CloneWithNewOperands(new_shape, new_operands, &context));\n TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));\n new_hlo = ConvertTupleElements(new_hlo, old_shape);\n } else {\n new_hlo = computation->AddInstruction(\n hlo->CloneWithNewOperands(hlo->shape(), new_operands, &context));\n TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));\n }\n TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_hlo));\n TF_RETURN_IF_ERROR(hlo->DropAllControlDeps());\n TF_RETURN_IF_ERROR(computation->RemoveInstruction(hlo));\n changed = true;\n }\n }\n XLA_VLOG_LINES(\n 2, \"HloElementTypeConverter::Run(), after:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/hlo_element_type_converter.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/tests/hlo_test_base.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nusing ::testing::Contains;\nusing ::testing::ElementsAre;\nusing ::testing::Eq;\nusing ::testing::Not;\nusing ::testing::ResultOf;\nusing HloElementTypeConverterTest = HloTestBase;\nTEST_F(HloElementTypeConverterTest, CustomCallsNotConverted) {\n const std::string& hlo_string = R\"(\n HloModule custom_call\n ENTRY CustomCall {\n constant = bf16[1]{0} constant({12345})\n ROOT custom-call = bf16[1,2,3]{0,2,1} custom-call(constant),\n custom_call_target=\"foo\"\n }\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_FALSE(converted);\n}\nTEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) {\n const std::string& hlo_string = R\"(\n HloModule InfeedOutfeed\n ENTRY RoundTrip16MiBR1.v2 {\n token0 = token[] after-all()\n infeed = (bf16[4]{0}, token[]) infeed(token0)\n ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0\n outfeed = token[] outfeed(infeed.data, token0)\n }\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_FALSE(converted);\n}\nTEST_F(HloElementTypeConverterTest, OperationsInNestedTuplesConverted) {\n const std::string& hlo_string = R\"(\n HloModule NestedTuples\n ENTRY NestedTuples.v5 {\n constant.2 = f32[2]{0} constant({1, 2})\n constant.3 = bf16[2]{0} constant({42, 42})\n add = bf16[2]{0} add(constant.2, constant.3)\n tuple = (f32[2]{0}, bf16[2]{0}) tuple(constant.2, add)\n constant.5 = bf16[2]{0} constant({22, 44})\n ROOT tuple.1 = ((f32[2]{0}, bf16[2]{0}), bf16[2]{0}) tuple(tuple, constant.5)\n }\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_TRUE(converted);\n const HloInstruction* bf16_op =\n module->entry_computation()->root_instruction()->operand(0)->operand(1);\n EXPECT_THAT(bf16_op, op::Convert(op::Add(op::Constant(), op::Convert())));\n}\nTEST_F(HloElementTypeConverterTest, BatchNormGradBF16Converted) {\n const std::string& hlo_string = R\"(\n HloModule BatchNormGrad\n ENTRY BatchNormGrad.v6 {\n constant.4 = bf16[2,2,2,1]{3,2,1,0} constant({ { \n { {0}, {0} }, { {0}, {0} } }, { { {0},\n {0} }, { {0}, {0} } } })\n constant.5 = bf16[2]{0} constant({1, 1})\n constant.6 = bf16[2]{0} constant({0, 0})\n constant.7 = bf16[2]{0} constant({1, 1})\n constant.8 = bf16[2,2,2,1]{3,2,1,0} constant({ { \n { {1}, {2} }, { {3}, {4} } }, { { \n {5}, {6} }, { {7}, {8} } } })\n ROOT batch-norm-grad = (bf16[2,2,2,1]{3,2,1,0}, bf16[2]{0}, bf16[2]{0})\n batch-norm-grad(constant.4, constant.5, constant.6, constant.7,\n constant.8), epsilon=0, feature_index=2\n }\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_TRUE(converted);\n const HloInstruction* tuple_instr =\n module->entry_computation()->root_instruction();\n ::testing::Matcher batch_norm =\n op::BatchNormGrad();\n EXPECT_THAT(tuple_instr,\n op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)),\n op::Convert(op::GetTupleElement(batch_norm, 1)),\n op::Convert(op::GetTupleElement(batch_norm, 2))));\n}\nTEST_F(HloElementTypeConverterTest, RngIsRemoved) {\n const std::string& hlo_string = R\"(\nHloModule RngIsRemoved\nENTRY main {\n constant.3 = bf16[] constant(0)\n constant.4 = bf16[] constant(1)\n ROOT rng = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform\n}\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_TRUE(converted);\n HloPredicate is_bf16_rng = [](const HloInstruction* inst) {\n return inst->shape().element_type() == BF16 &&\n inst->opcode() == HloOpcode::kRng;\n };\n EXPECT_THAT(module->entry_computation()->instructions(),\n Not(Contains(ResultOf(is_bf16_rng, Eq(true)))));\n}\nTEST_F(HloElementTypeConverterTest, RngCtrlDep) {\n const std::string& hlo_string = R\"(\nHloModule RngIsRemoved\nENTRY main {\n constant.3 = bf16[] constant(0)\n constant.4 = bf16[] constant(1)\n rng0 = bf16[1,2000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform\n ROOT rng1 = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), control-predecessors={%rng0}, distribution=rng_uniform\n}\n )\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter type_converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));\n EXPECT_TRUE(converted);\n HloInstruction *rng0, *rng1;\n for (auto* inst : module->entry_computation()->instructions()) {\n if (inst->opcode() == HloOpcode::kRng) {\n const Shape& shape = inst->shape();\n ASSERT_EQ(shape.dimensions_size(), 3);\n ASSERT_TRUE(shape.dimensions(1) == 2000 || shape.dimensions(1) == 1000);\n if (shape.dimensions(1) == 2000) {\n rng0 = inst;\n } else {\n rng1 = inst;\n }\n }\n }\n EXPECT_THAT(rng0->control_successors(), ElementsAre(rng1));\n EXPECT_THAT(rng1->control_predecessors(), ElementsAre(rng0));\n}\nTEST_F(HloElementTypeConverterTest, BitcastConvertIsUnmodified) {\n const std::string& hlo_string = R\"(\n HloModule test\n ENTRY test {\n p = bf16[] parameter(0)\n ROOT c = u16[] bitcast-convert(p)\n })\";\n auto module = ParseAndReturnVerifiedModule(hlo_string).value();\n HloElementTypeConverter converter(BF16, F32);\n TF_ASSERT_OK_AND_ASSIGN(bool converted, RunHloPass(&converter, module.get()));\n EXPECT_FALSE(converted);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":259,"cells":{"ID":{"kind":"string","value":"59953132-5105-4aa4-93e8-9e703673d57a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"flat_map_dataset_op"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/kernels/data/flat_map_dataset_op.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/data/flat_map_dataset_op_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/flat_map_dataset_op.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"tensorflow/core/common_runtime/function.h\"\n#include \"tensorflow/core/common_runtime/graph_constructor.h\"\n#include \"tensorflow/core/common_runtime/graph_runner.h\"\n#include \"tensorflow/core/common_runtime/input_colocation_exemption_registry.h\"\n#include \"tensorflow/core/data/captured_function.h\"\n#include \"tensorflow/core/data/dataset_utils.h\"\n#include \"tensorflow/core/data/flat_map_utils.h\"\n#include \"tensorflow/core/data/name_utils.h\"\n#include \"tensorflow/core/data/serialization_utils.h\"\n#include \"tensorflow/core/framework/dataset.h\"\n#include \"tensorflow/core/framework/dataset_options.pb.h\"\n#include \"tensorflow/core/framework/graph.pb.h\"\n#include \"tensorflow/core/framework/partial_tensor_shape.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/lib/random/random.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/thread_annotations.h\"\nnamespace tensorflow {\nnamespace data {\n constexpr const char* const FlatMapDatasetOp::kDatasetType;\n constexpr const char* const FlatMapDatasetOp::kInputDataset;\n constexpr const char* const FlatMapDatasetOp::kOtherArguments;\n constexpr const char* const FlatMapDatasetOp::kFunc;\n constexpr const char* const FlatMapDatasetOp::kTarguments;\n constexpr const char* const FlatMapDatasetOp::kOutputTypes;\n constexpr const char* const FlatMapDatasetOp::kOutputShapes;\nconstexpr int64_t kMaxRandomIndexingCardinality = 100;\nconstexpr char kCycleLength[] = \"cycle_length\";\nconstexpr char kElementIndex[] = \"element_index\";\nconstexpr char kInputsSize[] = \"inputs_size\";\nconstexpr char kInputs[] = \"inputs\";\nconstexpr char kCurrentElementIteratorUninitialized[] =\n \"current_element_iterator_uninitialized\";\nconstexpr char kExhausted[] = \"exhausted\";\nclass FlatMapDatasetOp::Dataset : public DatasetBase {\n public:\n Dataset(OpKernelContext* ctx, const DatasetBase* input,\n std::unique_ptr captured_func,\n const DataTypeVector& output_types,\n const std::vector& output_shapes)\n : DatasetBase(DatasetContext(ctx)),\n input_(input),\n captured_func_(std::move(captured_func)),\n output_types_(output_types),\n output_shapes_(output_shapes),\n random_access_handler_(ctx, input, *captured_func_) {\n input_->Ref();\n random_indexing_compatible_ = input_->RandomIndexingCompatible();\n if (random_indexing_compatible_.ok() &&\n input_->Cardinality() > kMaxRandomIndexingCardinality) {\n random_indexing_compatible_ = absl::FailedPreconditionError(\n absl::StrCat(\"The cardinality of the input to \", type_string(),\n \" is too large to support global shuffling. It is \",\n input_->Cardinality(), \", which is greater than \",\n kMaxRandomIndexingCardinality));\n }\n }\n ~Dataset() override { input_->Unref(); }\n std::unique_ptr MakeIteratorInternal(\n const string& prefix) const override {\n return std::make_unique(Iterator::Params{\n this, name_utils::IteratorPrefix(kDatasetType, prefix)});\n }\n const DataTypeVector& output_dtypes() const override { return output_types_; }\n const std::vector& output_shapes() const override {\n return output_shapes_;\n }\n string DebugString() const override {\n return name_utils::DatasetDebugString(kDatasetType);\n }\n int64_t CardinalityInternal(CardinalityOptions options) const override {\n if (options.compute_level() <\n CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) {\n return kUnknownCardinality;\n }\n absl::StatusOr cardinality = random_access_handler_.Cardinality();\n if (!cardinality.ok()) {\n LOG(ERROR) << \"Unable to compute cardinality for dataset \"\n << DebugString() << \" due to error: \" << cardinality.status();\n return kUnknownCardinality;\n }\n return *cardinality;\n }\n Status InputDatasets(std::vector* inputs) const override {\n inputs->push_back(input_);\n return absl::OkStatus();\n }\n Status CheckExternalState() const override {\n TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());\n return input_->CheckExternalState();\n }\n absl::Status RandomIndexingCompatible() const override {\n return absl::UnimplementedError(\n \"Please consider applying maps on each dataset, concatenating them \"\n \"into \"\n \"one dataset and apply global shuffle dataset op onto the \"\n \"dataset to achieve the same result as flat map with global \"\n \"shuffling.\");\n }\n protected:\n Status AsGraphDefInternal(SerializationContext* ctx,\n DatasetGraphDefBuilder* b,\n Node** output) const override {\n Node* input_graph_node = nullptr;\n TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));\n std::vector other_arguments;\n DataTypeVector other_arguments_types;\n TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,\n &other_arguments_types));\n AttrValue f;\n b->BuildAttrValue(captured_func_->func(), &f);\n AttrValue other_arguments_types_attr;\n b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);\n TF_RETURN_IF_ERROR(b->AddDataset(\n this, {std::make_pair(0, input_graph_node)}, \n {std::make_pair(1, other_arguments)}, \n {std::make_pair(kFunc, f),\n std::make_pair(kTarguments, other_arguments_types_attr)}, \n output));\n return absl::OkStatus();\n }\n private:\n class Iterator : public DatasetIterator {\n public:\n explicit Iterator(const Params& params)\n : DatasetIterator(params) {}\n bool SymbolicCheckpointCompatible() const override { return true; }\n Status Initialize(IteratorContext* ctx) override {\n mutex_lock l(mu_);\n input_ckpt_ = std::make_unique(ctx->id_registry());\n TF_RETURN_IF_ERROR(\n dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));\n return dataset()->captured_func_->Instantiate(\n ctx, &instantiated_captured_func_);\n }\n Status GetNextInternal(IteratorContext* ctx,\n std::vector* out_tensors,\n bool* end_of_sequence) override {\n if (ctx->index_mapper()) {\n return Get(ctx, out_tensors, end_of_sequence);\n }\n mutex_lock l(mu_);\n do {\n if (!input_impl_) {\n *end_of_sequence = true;\n return absl::OkStatus();\n }\n if (current_element_iterator_) {\n bool end_of_element;\n auto nested_ctx = MakeNestedIteratorContext(ctx);\n TF_RETURN_IF_ERROR(current_element_iterator_->GetNext(\n &nested_ctx, out_tensors, &end_of_element));\n ctx->MergeCheckpoint(nested_ctx.checkpoint());\n if (!end_of_element) {\n *end_of_sequence = false;\n return absl::OkStatus();\n }\n ctx->MergeCheckpoint(input_ckpt_.get());\n ctx->PurgeCheckpoint(current_element_iterator_->prefix());\n current_element_iterator_.reset();\n }\n inputs_.clear();\n auto input_ctx = std::make_unique(*ctx);\n TF_RETURN_IF_ERROR(\n input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));\n input_ckpt_->Merge(input_ctx->checkpoint());\n if (*end_of_sequence) {\n input_impl_.reset();\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(\n BuildCurrentElementIteratorLocked(ctx, true));\n } while (true);\n }\n Status SkipInternal(IteratorContext* ctx, int num_to_skip,\n bool* end_of_sequence, int* num_skipped) override {\n mutex_lock l(mu_);\n *num_skipped = 0;\n while (*num_skipped < num_to_skip) {\n if (!input_impl_) {\n *end_of_sequence = true;\n return absl::OkStatus();\n }\n if (current_element_iterator_) {\n bool end_of_element;\n auto nested_ctx = MakeNestedIteratorContext(ctx);\n int last_num_skipped;\n TF_RETURN_IF_ERROR(current_element_iterator_->Skip(\n &nested_ctx, num_to_skip - *num_skipped, &end_of_element,\n &last_num_skipped));\n *num_skipped += last_num_skipped;\n ctx->MergeCheckpoint(nested_ctx.checkpoint());\n if (!end_of_element) {\n if (*num_skipped != num_to_skip) {\n return absl::InternalError(absl::StrFormat(\n \"Expected `num_skipped` and `num_to_skip` to be the same. Got\"\n \" %d(num_skipped) and %d(num_to_skip)\",\n *num_skipped, num_to_skip));\n }\n continue;\n }\n ctx->MergeCheckpoint(input_ckpt_.get());\n ctx->PurgeCheckpoint(current_element_iterator_->prefix());\n current_element_iterator_.reset();\n }\n inputs_.clear();\n auto input_ctx = std::make_unique(*ctx);\n TF_RETURN_IF_ERROR(\n input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));\n input_ckpt_->Merge(input_ctx->checkpoint());\n if (*end_of_sequence) {\n input_impl_.reset();\n *end_of_sequence = true;\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(\n BuildCurrentElementIteratorLocked(ctx, false));\n }\n *end_of_sequence = false;\n return absl::OkStatus();\n }\n absl::Status Get(IteratorContext* ctx, std::vector* out_tensors,\n bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) {\n mutex_lock l(mu_);\n TF_ASSIGN_OR_RETURN(size_t parent_index,\n ctx->index_mapper()(element_count_));\n FlatMapRandomAccessHandler& random_access =\n dataset()->random_access_handler_;\n absl::StatusOr dataset_index =\n random_access.GetDatasetIndex(parent_index);\n if (absl::IsOutOfRange(dataset_index.status())) {\n *end_of_sequence = true;\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(dataset_index.status());\n if (dataset_iterators_.empty()) {\n TF_ASSIGN_OR_RETURN(\n dataset_iterators_,\n random_access.MakeInputIterators(ctx, this, prefix()));\n next_positions_.resize(dataset_iterators_.size(), 0);\n input_element_counts_.resize(dataset_iterators_.size(), 0);\n }\n IteratorContext::Params params(ctx);\n params.index_mapper =\n GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index);\n IteratorContext global_shuffle_ctx(std::move(params));\n TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext(\n &global_shuffle_ctx, out_tensors, end_of_sequence));\n ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint());\n ++element_count_;\n ++input_element_counts_[*dataset_index];\n return absl::OkStatus();\n }\n IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper,\n size_t input_dataset_index)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n absl::StatusOr cardinality =\n dataset()->random_access_handler_.Cardinality();\n return [this, parent_index_mapper = std::move(parent_index_mapper),\n input_dataset_index, cardinality = std::move(cardinality)](\n size_t element_position) -> absl::StatusOr {\n if (!cardinality.ok() || *cardinality < 0) {\n return absl::FailedPreconditionError(\n \"Global shuffling requires finite cardinalities.\");\n }\n FlatMapRandomAccessHandler& random_access =\n dataset()->random_access_handler_;\n while (next_positions_[input_dataset_index] < *cardinality) {\n size_t index = next_positions_[input_dataset_index];\n if (parent_index_mapper != nullptr) {\n TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index));\n }\n ++next_positions_[input_dataset_index];\n TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index,\n random_access.GetDatasetIndex(index));\n if (input_dataset_index == shuffled_dataset_index) {\n if (input_dataset_index > 0) {\n TF_ASSIGN_OR_RETURN(\n int64_t cumulative_cardinality,\n random_access.CumulativeCardinality(input_dataset_index - 1));\n index -= cumulative_cardinality;\n }\n return index;\n }\n }\n return *cardinality;\n };\n }\n protected:\n std::shared_ptr CreateNode(\n IteratorContext* ctx, model::Node::Args args) const override {\n return model::MakeInterleaveManyNode(\n std::move(args),\n {model::MakeNonTunableParameter(kCycleLength, 1)});\n }\n Status SaveInternal(SerializationContext* ctx,\n IteratorStateWriter* writer) override\n TF_LOCKS_EXCLUDED(mu_) {\n TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(\n dataset()->captured_func_->CheckExternalState()));\n mutex_lock l(mu_);\n TF_RETURN_IF_ERROR(writer->WriteScalar(\n prefix(), kExhausted, static_cast(!input_impl_)));\n if (input_impl_) {\n TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));\n TF_RETURN_IF_ERROR(\n writer->WriteScalar(prefix(), kElementIndex, element_index_));\n TF_RETURN_IF_ERROR(writer->WriteScalar(\n prefix(), kCurrentElementIteratorUninitialized,\n static_cast(!current_element_iterator_)));\n if (current_element_iterator_ && !ctx->symbolic_checkpoint()) {\n TF_RETURN_IF_ERROR(\n writer->WriteScalar(prefix(), kInputsSize, inputs_.size()));\n for (int i = 0; i < inputs_.size(); i++) {\n TF_RETURN_IF_ERROR(writer->WriteTensor(\n prefix(), strings::StrCat(kInputs, \"[\", i, \"]\"), inputs_[i]));\n }\n TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_));\n }\n }\n return absl::OkStatus();\n }\n Status RestoreInternal(IteratorContext* ctx,\n IteratorStateReader* reader) override\n TF_LOCKS_EXCLUDED(mu_) {\n if (ctx->restored_element_count().has_value()) {\n return RestoreForGlobalShuffle(ctx, reader);\n }\n mutex_lock l(mu_);\n input_impl_.reset();\n element_index_ = 0;\n current_element_iterator_.reset();\n inputs_.clear();\n int64_t input_exhausted;\n TF_RETURN_IF_ERROR(\n reader->ReadScalar(prefix(), kExhausted, &input_exhausted));\n if (!static_cast(input_exhausted)) {\n TF_RETURN_IF_ERROR(\n dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));\n TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));\n {\n int64_t temp;\n TF_RETURN_IF_ERROR(\n reader->ReadScalar(prefix(), kElementIndex, &temp));\n element_index_ = temp;\n }\n int64_t current_element_iterator_uninitialized;\n TF_RETURN_IF_ERROR(\n reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized,\n &current_element_iterator_uninitialized));\n if (!static_cast(current_element_iterator_uninitialized)) {\n TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader));\n }\n }\n return absl::OkStatus();\n }\n Status RestoreForGlobalShuffle(IteratorContext* ctx,\n IteratorStateReader* reader)\n TF_LOCKS_EXCLUDED(mu_) {\n mutex_lock l(mu_);\n element_count_ = *ctx->restored_element_count();\n FlatMapRandomAccessHandler& random_access =\n dataset()->random_access_handler_;\n TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality());\n if (dataset_iterators_.empty()) {\n TF_ASSIGN_OR_RETURN(\n dataset_iterators_,\n random_access.MakeInputIterators(ctx, this, prefix()));\n }\n input_element_counts_.resize(dataset_iterators_.size(), 0);\n next_positions_.resize(dataset_iterators_.size(), 0);\n std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0);\n std::fill(next_positions_.begin(), next_positions_.end(), 0);\n for (size_t count = 0; count < element_count_ && count < cardinality;\n ++count) {\n TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count));\n absl::StatusOr dataset_index =\n random_access.GetDatasetIndex(parent_index);\n if (absl::IsOutOfRange(dataset_index.status())) {\n break;\n }\n TF_RETURN_IF_ERROR(dataset_index.status());\n ++input_element_counts_[*dataset_index];\n next_positions_[*dataset_index] = count + 1;\n }\n for (size_t i = 0; i < dataset_iterators_.size(); ++i) {\n IteratorContext::Params params(ctx);\n params.restored_element_count = input_element_counts_[i];\n IteratorContext ctx_copy(std::move(params));\n TF_RETURN_IF_ERROR(\n RestoreInput(&ctx_copy, reader, dataset_iterators_[i]));\n ctx->MergeCheckpoint(ctx_copy.checkpoint());\n }\n return absl::OkStatus();\n }\n private:\n Status BuildCurrentElementIteratorLocked(IteratorContext* ctx,\n bool is_get_next)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n std::shared_ptr node = is_get_next ? model_node() : nullptr;\n return MakeIteratorFromInputElement(\n ctx, this, inputs_, element_index_++, *instantiated_captured_func_,\n prefix(), &current_element_iterator_, node);\n }\n Status RestoreCurrentElementIterator(IteratorContext* ctx,\n IteratorStateReader* reader)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n if (ctx->symbolic_checkpoint()) {\n return RestoreCurrentElementIteratorSymbolic(ctx, reader);\n }\n size_t inputs_size;\n {\n int64_t temp;\n TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp));\n inputs_size = static_cast(temp);\n }\n inputs_.reserve(inputs_size);\n for (int i = 0; i < inputs_size; i++) {\n inputs_.emplace_back();\n TF_RETURN_IF_ERROR(reader->ReadTensor(\n ctx->flr(), prefix(), strings::StrCat(kInputs, \"[\", i, \"]\"),\n &inputs_.back()));\n }\n element_index_--;\n TF_RETURN_IF_ERROR(\n BuildCurrentElementIteratorLocked(ctx, false));\n TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));\n return absl::OkStatus();\n }\n Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx,\n IteratorStateReader* reader)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n bool end_of_sequence;\n auto input_ctx = std::make_unique(*ctx);\n TF_RETURN_IF_ERROR(\n input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence));\n if (end_of_sequence) {\n return absl::FailedPreconditionError(\n \"Unexpected end of sequence while symbolically restoring \"\n \"FlatMapDataset. Please verify that the input produces data \"\n \"deterministically.\");\n }\n input_ckpt_->Merge(input_ctx->checkpoint());\n element_index_--;\n TF_RETURN_IF_ERROR(\n BuildCurrentElementIteratorLocked(ctx, false));\n TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));\n return absl::OkStatus();\n }\n mutex mu_;\n size_t element_index_ TF_GUARDED_BY(mu_) = 0;\n std::unique_ptr input_ckpt_ TF_GUARDED_BY(mu_);\n std::vector inputs_ TF_GUARDED_BY(mu_);\n std::unique_ptr instantiated_captured_func_;\n size_t element_count_ TF_GUARDED_BY(mu_) = 0;\n std::vector input_element_counts_ TF_GUARDED_BY(mu_);\n std::vector next_positions_;\n std::vector> dataset_iterators_\n TF_GUARDED_BY(mu_);\n std::unique_ptr input_impl_ TF_GUARDED_BY(mu_);\n std::unique_ptr current_element_iterator_ TF_GUARDED_BY(mu_);\n };\n const DatasetBase* const input_;\n const std::unique_ptr captured_func_;\n const DataTypeVector output_types_;\n const std::vector output_shapes_;\n absl::Status random_indexing_compatible_ = absl::OkStatus();\n mutable FlatMapRandomAccessHandler random_access_handler_;\n};\nFlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx)\n : UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {\n OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},\n &func_metadata_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));\n OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));\n}\nvoid FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,\n DatasetBase** output) {\n std::unique_ptr captured_func;\n OP_REQUIRES_OK(ctx,\n CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,\n &captured_func));\n *output = new Dataset(ctx, input, std::move(captured_func), output_types_,\n output_shapes_);\n}\nnamespace {\nREGISTER_KERNEL_BUILDER(Name(\"FlatMapDataset\").Device(DEVICE_CPU),\n FlatMapDatasetOp);\nREGISTER_INPUT_COLOCATION_EXEMPTION(\"FlatMapDataset\");\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/kernels/data/flat_map_dataset_op.h\"\n#include \"tensorflow/core/data/dataset_test_base.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nconstexpr char kNodeName[] = \"flat_map_dataset\";\nclass FlatMapDatasetParams : public DatasetParams {\n public:\n template \n FlatMapDatasetParams(T input_dataset_params,\n std::vector other_arguments,\n FunctionDefHelper::AttrValueWrapper func,\n std::vector func_lib,\n DataTypeVector type_arguments,\n DataTypeVector output_dtypes,\n std::vector output_shapes,\n string node_name)\n : DatasetParams(std::move(output_dtypes), std::move(output_shapes),\n std::move(node_name)),\n other_arguments_(std::move(other_arguments)),\n func_(std::move(func)),\n func_lib_(std::move(func_lib)),\n type_arguments_(std::move(type_arguments)) {\n input_dataset_params_.push_back(std::make_unique(input_dataset_params));\n iterator_prefix_ =\n name_utils::IteratorPrefix(input_dataset_params.dataset_type(),\n input_dataset_params.iterator_prefix());\n }\n std::vector GetInputTensors() const override {\n return other_arguments_;\n }\n Status GetInputNames(std::vector* input_names) const override {\n input_names->emplace_back(FlatMapDatasetOp::kInputDataset);\n for (int i = 0; i < other_arguments_.size(); ++i) {\n input_names->emplace_back(\n absl::StrCat(FlatMapDatasetOp::kOtherArguments, \"_\", i));\n }\n return absl::OkStatus();\n }\n Status GetAttributes(AttributeVector* attr_vector) const override {\n *attr_vector = {{\"f\", func_},\n {\"Targuments\", type_arguments_},\n {\"output_shapes\", output_shapes_},\n {\"output_types\", output_dtypes_},\n {\"metadata\", \"\"}};\n return absl::OkStatus();\n }\n string dataset_type() const override {\n return FlatMapDatasetOp::kDatasetType;\n }\n std::vector func_lib() const override { return func_lib_; }\n private:\n std::vector other_arguments_;\n FunctionDefHelper::AttrValueWrapper func_;\n std::vector func_lib_;\n DataTypeVector type_arguments_;\n};\nclass FlatMapDatasetOpTest : public DatasetOpsTestBase {};\nFlatMapDatasetParams FlatMapDatasetParams1() {\n auto tensor_slice_dataset_params = TensorSliceDatasetParams(\n {CreateTensor(TensorShape{3, 3, 1},\n {0, 1, 2, 3, 4, 5, 6, 7, 8})},\n \"tensor_slice\");\n auto func = FunctionDefHelper::FunctionRef(\n \"MakeTensorSliceDataset\",\n {{\"Toutput_types\", DataTypeVector({DT_INT64})},\n {\"output_shapes\",\n std::vector({PartialTensorShape({1})})}});\n return FlatMapDatasetParams(\n std::move(tensor_slice_dataset_params),\n {},\n func,\n {test::function::MakeTensorSliceDataset()},\n {},\n {DT_INT64},\n {PartialTensorShape({1})},\n kNodeName);\n}\nFlatMapDatasetParams InvalidFlatMapDatasetParams() {\n auto tensor_slice_dataset_params = TensorSliceDatasetParams(\n {CreateTensor(TensorShape{3, 3, 1},\n {0, 1, 2, 3, 4, 5, 6, 7, 8})},\n \"tensor_slice\");\n auto func = FunctionDefHelper::FunctionRef( \"NonZero\",\n {{\"T\", DT_INT64}});\n return FlatMapDatasetParams(std::move(tensor_slice_dataset_params),\n {},\n func,\n {test::function::NonZero()},\n {},\n {DT_INT64},\n {PartialTensorShape({1})},\n kNodeName);\n}\nstd::vector> GetNextTestCases() {\n return {\n {FlatMapDatasetParams1(),\n CreateTensors(TensorShape({1}),\n {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};\n}\nITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,\n GetNextTestCases())\nstd::vector> SkipTestCases() {\n return {{FlatMapDatasetParams1(),\n 2, 2, true,\n CreateTensors(TensorShape({1}), {{2}})},\n {FlatMapDatasetParams1(),\n 4, 4, true,\n CreateTensors(TensorShape({1}), {{4}})},\n {FlatMapDatasetParams1(),\n 9, 9, false},\n {FlatMapDatasetParams1(),\n 10, 9, false}};\n}\nITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,\n SkipTestCases())\nTEST_F(FlatMapDatasetOpTest, DatasetNodeName) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));\n}\nTEST_F(FlatMapDatasetOpTest, DatasetTypeString) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetTypeString(\n name_utils::OpName(FlatMapDatasetOp::kDatasetType)));\n}\nTEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));\n}\nTEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));\n}\nTEST_F(FlatMapDatasetOpTest, Cardinality) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));\n}\nTEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));\n}\nTEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));\n}\nTEST_F(FlatMapDatasetOpTest, IteratorPrefix) {\n auto dataset_params = FlatMapDatasetParams1();\n TF_ASSERT_OK(Initialize(dataset_params));\n TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(\n FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));\n}\nstd::vector>\nIteratorSaveAndRestoreTestCases() {\n return {\n {FlatMapDatasetParams1(),\n {0, 4, 11},\n CreateTensors(TensorShape({1}),\n {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};\n}\nITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,\n IteratorSaveAndRestoreTestCases())\nTEST_F(FlatMapDatasetOpTest, InvalidMapFunc) {\n auto dataset_params = InvalidFlatMapDatasetParams();\n TF_ASSERT_OK(Initialize(dataset_params));\n bool end_of_sequence = false;\n std::vector out_tensors;\n EXPECT_EQ(\n iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)\n .code(),\n absl::StatusCode::kInvalidArgument);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":260,"cells":{"ID":{"kind":"string","value":"2f0470b6-f9db-44fe-a6aa-ed8d231c295a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"validate"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/graph/validate.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/graph/validate_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/graph/validate.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/string_view.h\"\n#include \"tensorflow/core/framework/graph_def_util.h\"\n#include \"tensorflow/core/framework/node_def.pb.h\"\n#include \"tensorflow/core/framework/node_def_util.h\"\n#include \"tensorflow/core/framework/op_def_util.h\"\n#include \"tensorflow/core/framework/versions.pb.h\"\n#include \"tensorflow/core/lib/core/errors.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace graph {\nStatus ValidateGraphDef(const GraphDef& graph_def,\n const OpRegistryInterface& op_registry) {\n Status s;\n const int version = graph_def.versions().producer();\n for (const NodeDef& node_def : graph_def.node()) {\n const OpDef* op_def;\n TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(node_def.op(), &op_def));\n TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));\n TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, version));\n }\n return s;\n}\nStatus ValidateGraphDefAgainstOpRegistry(\n const GraphDef& graph_def, const OpRegistryInterface& op_registry) {\n GraphDef copy(graph_def);\n TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(&copy, op_registry, 0));\n return ValidateGraphDef(copy, op_registry);\n}\nStatus ValidateGraphDefAgainstOpList(const GraphDef& graph_def,\n const OpList& op_list) {\n OpListOpRegistry registry(&op_list);\n return ValidateGraphDefAgainstOpRegistry(graph_def, registry);\n}\nvoid GetOpListForValidation(OpList* op_list, const OpRegistry& op_registry) {\n op_registry.Export(false, op_list);\n RemoveDescriptionsFromOpList(op_list);\n}\nStatus ValidateGraphHasNoCycle(const Graph& graph) {\n std::vector ready;\n std::vector pending_count(graph.num_node_ids(), 0);\n for (int i = 0; i < graph.num_node_ids(); ++i) {\n const Node* n = graph.FindNodeId(i);\n if (n == nullptr) continue;\n pending_count[i] = n->in_edges().size();\n if (n->IsMerge()) {\n for (const Edge* e : n->in_edges()) {\n if (!e->IsControlEdge() && e->src()->IsNextIteration()) {\n pending_count[i]--;\n }\n }\n }\n if (pending_count[i] == 0) {\n ready.push_back(n);\n }\n }\n int processed = 0;\n while (!ready.empty()) {\n const Node* node = ready.back();\n ready.pop_back();\n ++processed;\n for (const Edge* out : node->out_edges()) {\n const int output_id = out->dst()->id();\n pending_count[output_id]--;\n if (pending_count[output_id] == 0) {\n ready.push_back(out->dst());\n }\n }\n }\n if (processed < graph.num_nodes()) {\n std::vector nodes_in_cycle;\n for (int i = 0; i < pending_count.size() && nodes_in_cycle.size() < 3;\n ++i) {\n if (pending_count[i] != 0) {\n nodes_in_cycle.push_back(graph.FindNodeId(i)->name());\n }\n }\n return errors::InvalidArgument(\n \"Graph is invalid, contains a cycle with \",\n graph.num_nodes() - processed,\n \" nodes, including: \", absl::StrJoin(nodes_in_cycle, \", \"));\n }\n return absl::OkStatus();\n}\nStatus VerifyNoDuplicateNodeNames(const GraphDef& graph) {\n absl::flat_hash_set nodes;\n for (const auto& node : graph.node()) {\n if (nodes.contains(node.name())) {\n return errors::AlreadyExists(\"Node already exists: \", node.name());\n }\n nodes.insert(node.name());\n }\n return absl::OkStatus();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/graph/validate.h\"\n#include \n#include \"tensorflow/core/common_runtime/graph_constructor.h\"\n#include \"tensorflow/core/framework/graph.pb.h\"\n#include \"tensorflow/core/framework/graph_def_util.h\"\n#include \"tensorflow/core/framework/op_def_builder.h\"\n#include \"tensorflow/core/graph/graph.h\"\n#include \"tensorflow/core/graph/graph_def_builder.h\"\n#include \"tensorflow/core/graph/subgraph.h\"\n#include \"tensorflow/core/kernels/ops_util.h\"\n#include \"tensorflow/core/lib/core/status.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/lib/strings/str_util.h\"\n#include \"tensorflow/core/platform/status_matchers.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace {\nREGISTER_OP(\"FloatInput\").Output(\"o: float\");\nREGISTER_OP(\"Int32Input\").Output(\"o: int32\");\nTEST(ValidateGraphDefTest, TestValidGraph) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { name: 'B' op: 'FloatInput' }\"\n \"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }\"\n \" input: ['A', 'B'] }\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));\n}\nTEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { name: 'B' op: 'Int32Input' }\"\n \"node { \"\n \" name: 'C' op: 'Sum' \"\n \" attr { key: 'T' value { type: DT_FLOAT } }\"\n \" input: ['A', 'B'] \"\n \"}\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());\n EXPECT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"NodeDef missing attr\"));\n TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));\n TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));\n}\nTEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { \"\n \" name: 'B' op: 'Cast' \"\n \" attr { key: 'SrcT' value { type: DT_FLOAT } }\"\n \" input: ['A'] \"\n \"}\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());\n EXPECT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"NodeDef missing attr\"));\n TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));\n s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());\n EXPECT_FALSE(s.ok());\n EXPECT_TRUE(absl::StrContains(s.ToString(), \"NodeDef missing attr\"));\n}\nTEST(ValidateGraphDefAgainstOpListTest, GraphWithOpOnlyInOpList) {\n OpRegistrationData op_reg_data;\n TF_ASSERT_OK(OpDefBuilder(\"UniqueSnowflake\").Finalize(&op_reg_data));\n OpList op_list;\n *op_list.add_op() = op_reg_data.op_def;\n const string graph_def_str = \"node { name: 'A' op: 'UniqueSnowflake' }\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n TF_ASSERT_OK(graph::ValidateGraphDefAgainstOpList(graph_def, op_list));\n}\nTEST(ValidateGraphDefAgainstOpListTest, GraphWithGlobalOpNotInOpList) {\n OpRegistrationData op_reg_data;\n TF_ASSERT_OK(OpDefBuilder(\"NotAnywhere\").Finalize(&op_reg_data));\n OpList op_list;\n *op_list.add_op() = op_reg_data.op_def;\n const string graph_def_str = \"node { name: 'A' op: 'FloatInput' }\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n ASSERT_FALSE(graph::ValidateGraphDefAgainstOpList(graph_def, op_list).ok());\n}\nREGISTER_OP(\"HasDocs\").Doc(\"This is in the summary.\");\nTEST(GetOpListForValidationTest, ShouldStripDocs) {\n bool found_float = false;\n bool found_int32 = false;\n bool found_has_docs = false;\n OpList op_list;\n graph::GetOpListForValidation(&op_list);\n for (const OpDef& op_def : op_list.op()) {\n if (op_def.name() == \"FloatInput\") {\n EXPECT_FALSE(found_float);\n found_float = true;\n }\n if (op_def.name() == \"Int32Input\") {\n EXPECT_FALSE(found_int32);\n found_int32 = true;\n }\n if (op_def.name() == \"HasDocs\") {\n EXPECT_FALSE(found_has_docs);\n found_has_docs = true;\n EXPECT_TRUE(op_def.summary().empty());\n }\n }\n EXPECT_TRUE(found_float);\n EXPECT_TRUE(found_int32);\n EXPECT_TRUE(found_has_docs);\n}\nTEST(VerifyNoDuplicateNodeNames, NoDuplicateNodeNames) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { name: 'B' op: 'Int32Input' }\"\n \"node { \"\n \" name: 'C' op: 'Sum' \"\n \" attr { key: 'T' value { type: DT_FLOAT } }\"\n \" input: ['A', 'B'] \"\n \"}\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n TF_ASSERT_OK(graph::VerifyNoDuplicateNodeNames(graph_def));\n}\nTEST(VerifyNoDuplicateNodeNames, DuplicateNodeNames) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { name: 'A' op: 'Int32Input' }\"\n \"node { \"\n \" name: 'C' op: 'Sum' \"\n \" attr { key: 'T' value { type: DT_FLOAT } }\"\n \" input: ['A', 'A'] \"\n \"}\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n EXPECT_EQ(graph::VerifyNoDuplicateNodeNames(graph_def).code(),\n tensorflow::error::ALREADY_EXISTS);\n}\nTEST(ValidateGraphHasNoCycleTest, NoCyclePasses) {\n const string graph_def_str =\n \"node { name: 'A' op: 'FloatInput' }\"\n \"node { name: 'B' op: 'FloatInput' }\"\n \"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }\"\n \" input: ['A', 'B'] }\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n Graph graph(OpRegistry::Global());\n GraphConstructorOptions opts;\n TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));\n TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));\n}\nTEST(ValidateGraphHasNoCycleTest, NoCycleWithMergePasses) {\n const string graph_def_str =\n R\"EOF(\n node { name: 'A' op: 'FloatInput' }\n node { name: 'merge' op: 'Merge' input: [ 'A:0', 'next:0' ]\n attr { key: \"N\" value: { i: 2 } }\n attr { key: \"T\" value: { type: DT_FLOAT } } }\n node { name: 'B' op: 'Mul'\n attr { key: 'T' value { type: DT_FLOAT } }\n input: [ 'merge:0', 'merge:0' ] }\n node { name: 'next' op: 'NextIteration' input: ['B:0']\n attr { key: \"T\" value: { type: DT_FLOAT } } }\n )EOF\";\n GraphDef graph_def;\n auto parser = protobuf::TextFormat::Parser();\n CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;\n Graph graph(OpRegistry::Global());\n GraphConstructorOptions opts;\n TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));\n TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));\n}\nNode* AddNodeFromNodeDef(Graph& graph, const string& name,\n const string& node_type, int num_inputs) {\n auto builder = NodeDefBuilder(name, node_type);\n for (int i = 0; i < num_inputs; ++i) {\n builder = builder.Input(strings::StrCat(\"node_\", i), i, DT_FLOAT);\n }\n NodeDef node_def;\n TF_CHECK_OK(builder.Finalize(&node_def));\n Status s;\n Node* node = graph.AddNode(node_def, &s);\n TF_CHECK_OK(s);\n return node;\n}\nTEST(ValidateGraphHasNoCycleTest, CycleFails) {\n Graph graph(OpRegistry::Global());\n Node* a = AddNodeFromNodeDef(graph, \"A\", \"FloatInput\", 0);\n Node* c = AddNodeFromNodeDef(graph, \"B\", \"Mul\", 2);\n graph.AddEdge(a, 0, c, 0);\n graph.AddEdge(c, 0, c, 1); \n EXPECT_THAT(\n graph::ValidateGraphHasNoCycle(graph),\n tsl::testing::StatusIs(\n tsl::error::Code::INVALID_ARGUMENT,\n ::testing::ContainsRegex(\"Graph is invalid, contains a cycle\")));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":261,"cells":{"ID":{"kind":"string","value":"4df1949d-aa72-4d9a-8547-771218cf3f33"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/cel-cpp"},"File Name":{"kind":"string","value":"resolver"},"File Path in Repository":{"kind":"string","value":"eval/compiler/resolver.cc"},"File Path for Unit Test":{"kind":"string","value":"eval/compiler/resolver_test.cc"},"Code":{"kind":"string","value":"#include \"eval/compiler/resolver.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/nullability.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/match.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/strings/strip.h\"\n#include \"absl/types/optional.h\"\n#include \"base/kind.h\"\n#include \"common/memory.h\"\n#include \"common/type.h\"\n#include \"common/value.h\"\n#include \"common/value_manager.h\"\n#include \"internal/status_macros.h\"\n#include \"runtime/function_overload_reference.h\"\n#include \"runtime/function_registry.h\"\n#include \"runtime/type_registry.h\"\nnamespace google::api::expr::runtime {\nusing ::cel::Value;\nResolver::Resolver(\n absl::string_view container, const cel::FunctionRegistry& function_registry,\n const cel::TypeRegistry&, cel::ValueManager& value_factory,\n const absl::flat_hash_map&\n resolveable_enums,\n bool resolve_qualified_type_identifiers)\n : namespace_prefixes_(),\n enum_value_map_(),\n function_registry_(function_registry),\n value_factory_(value_factory),\n resolveable_enums_(resolveable_enums),\n resolve_qualified_type_identifiers_(resolve_qualified_type_identifiers) {\n auto container_elements = absl::StrSplit(container, '.');\n std::string prefix = \"\";\n namespace_prefixes_.push_back(prefix);\n for (const auto& elem : container_elements) {\n if (elem.empty()) {\n continue;\n }\n absl::StrAppend(&prefix, elem, \".\");\n namespace_prefixes_.insert(namespace_prefixes_.begin(), prefix);\n }\n for (const auto& prefix : namespace_prefixes_) {\n for (auto iter = resolveable_enums_.begin();\n iter != resolveable_enums_.end(); ++iter) {\n absl::string_view enum_name = iter->first;\n if (!absl::StartsWith(enum_name, prefix)) {\n continue;\n }\n auto remainder = absl::StripPrefix(enum_name, prefix);\n const auto& enum_type = iter->second;\n for (const auto& enumerator : enum_type.enumerators) {\n auto key = absl::StrCat(remainder, !remainder.empty() ? \".\" : \"\",\n enumerator.name);\n enum_value_map_[key] = value_factory.CreateIntValue(enumerator.number);\n }\n }\n }\n}\nstd::vector Resolver::FullyQualifiedNames(absl::string_view name,\n int64_t expr_id) const {\n std::vector names;\n if (absl::StartsWith(name, \".\")) {\n std::string fully_qualified_name = std::string(name.substr(1));\n names.push_back(fully_qualified_name);\n return names;\n }\n for (const auto& prefix : namespace_prefixes_) {\n std::string fully_qualified_name = absl::StrCat(prefix, name);\n names.push_back(fully_qualified_name);\n }\n return names;\n}\nabsl::optional Resolver::FindConstant(absl::string_view name,\n int64_t expr_id) const {\n auto names = FullyQualifiedNames(name, expr_id);\n for (const auto& name : names) {\n auto enum_entry = enum_value_map_.find(name);\n if (enum_entry != enum_value_map_.end()) {\n return enum_entry->second;\n }\n if (resolve_qualified_type_identifiers_ || !absl::StrContains(name, \".\")) {\n auto type_value = value_factory_.FindType(name);\n if (type_value.ok() && type_value->has_value()) {\n return value_factory_.CreateTypeValue(**type_value);\n }\n }\n }\n return absl::nullopt;\n}\nstd::vector Resolver::FindOverloads(\n absl::string_view name, bool receiver_style,\n const std::vector& types, int64_t expr_id) const {\n std::vector funcs;\n auto names = FullyQualifiedNames(name, expr_id);\n for (auto it = names.begin(); it != names.end(); it++) {\n funcs = function_registry_.FindStaticOverloads(*it, receiver_style, types);\n if (!funcs.empty()) {\n return funcs;\n }\n }\n return funcs;\n}\nstd::vector Resolver::FindLazyOverloads(\n absl::string_view name, bool receiver_style,\n const std::vector& types, int64_t expr_id) const {\n std::vector funcs;\n auto names = FullyQualifiedNames(name, expr_id);\n for (const auto& name : names) {\n funcs = function_registry_.FindLazyOverloads(name, receiver_style, types);\n if (!funcs.empty()) {\n return funcs;\n }\n }\n return funcs;\n}\nabsl::StatusOr>>\nResolver::FindType(absl::string_view name, int64_t expr_id) const {\n auto qualified_names = FullyQualifiedNames(name, expr_id);\n for (auto& qualified_name : qualified_names) {\n CEL_ASSIGN_OR_RETURN(auto maybe_type,\n value_factory_.FindType(qualified_name));\n if (maybe_type.has_value()) {\n return std::make_pair(std::move(qualified_name), std::move(*maybe_type));\n }\n }\n return absl::nullopt;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"eval/compiler/resolver.h\"\n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/types/optional.h\"\n#include \"base/type_provider.h\"\n#include \"common/memory.h\"\n#include \"common/type_factory.h\"\n#include \"common/type_manager.h\"\n#include \"common/value.h\"\n#include \"common/value_manager.h\"\n#include \"common/values/legacy_value_manager.h\"\n#include \"eval/public/cel_function.h\"\n#include \"eval/public/cel_function_registry.h\"\n#include \"eval/public/cel_type_registry.h\"\n#include \"eval/public/cel_value.h\"\n#include \"eval/public/structs/protobuf_descriptor_type_provider.h\"\n#include \"eval/testutil/test_message.pb.h\"\n#include \"internal/testing.h\"\nnamespace google::api::expr::runtime {\nnamespace {\nusing ::cel::IntValue;\nusing ::cel::TypeFactory;\nusing ::cel::TypeManager;\nusing ::cel::TypeValue;\nusing ::cel::ValueManager;\nusing ::testing::Eq;\nclass FakeFunction : public CelFunction {\n public:\n explicit FakeFunction(const std::string& name)\n : CelFunction(CelFunctionDescriptor{name, false, {}}) {}\n absl::Status Evaluate(absl::Span args, CelValue* result,\n google::protobuf::Arena* arena) const override {\n return absl::OkStatus();\n }\n};\nclass ResolverTest : public testing::Test {\n public:\n ResolverTest()\n : value_factory_(cel::MemoryManagerRef::ReferenceCounting(),\n type_registry_.GetTypeProvider()) {}\n protected:\n CelTypeRegistry type_registry_;\n cel::common_internal::LegacyValueManager value_factory_;\n};\nTEST_F(ResolverTest, TestFullyQualifiedNames) {\n CelFunctionRegistry func_registry;\n Resolver resolver(\"google.api.expr\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto names = resolver.FullyQualifiedNames(\"simple_name\");\n std::vector expected_names(\n {\"google.api.expr.simple_name\", \"google.api.simple_name\",\n \"google.simple_name\", \"simple_name\"});\n EXPECT_THAT(names, Eq(expected_names));\n}\nTEST_F(ResolverTest, TestFullyQualifiedNamesPartiallyQualifiedName) {\n CelFunctionRegistry func_registry;\n Resolver resolver(\"google.api.expr\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto names = resolver.FullyQualifiedNames(\"expr.simple_name\");\n std::vector expected_names(\n {\"google.api.expr.expr.simple_name\", \"google.api.expr.simple_name\",\n \"google.expr.simple_name\", \"expr.simple_name\"});\n EXPECT_THAT(names, Eq(expected_names));\n}\nTEST_F(ResolverTest, TestFullyQualifiedNamesAbsoluteName) {\n CelFunctionRegistry func_registry;\n Resolver resolver(\"google.api.expr\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto names = resolver.FullyQualifiedNames(\".google.api.expr.absolute_name\");\n EXPECT_THAT(names.size(), Eq(1));\n EXPECT_THAT(names[0], Eq(\"google.api.expr.absolute_name\"));\n}\nTEST_F(ResolverTest, TestFindConstantEnum) {\n CelFunctionRegistry func_registry;\n type_registry_.Register(TestMessage::TestEnum_descriptor());\n Resolver resolver(\"google.api.expr.runtime.TestMessage\",\n func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto enum_value = resolver.FindConstant(\"TestEnum.TEST_ENUM_1\", -1);\n ASSERT_TRUE(enum_value);\n ASSERT_TRUE(enum_value->Is());\n EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(1L));\n enum_value = resolver.FindConstant(\n \".google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_2\", -1);\n ASSERT_TRUE(enum_value);\n ASSERT_TRUE(enum_value->Is());\n EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(2L));\n}\nTEST_F(ResolverTest, TestFindConstantUnqualifiedType) {\n CelFunctionRegistry func_registry;\n Resolver resolver(\"cel\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto type_value = resolver.FindConstant(\"int\", -1);\n EXPECT_TRUE(type_value);\n EXPECT_TRUE(type_value->Is());\n EXPECT_THAT(type_value->GetType().name(), Eq(\"int\"));\n}\nTEST_F(ResolverTest, TestFindConstantFullyQualifiedType) {\n google::protobuf::LinkMessageReflection();\n CelFunctionRegistry func_registry;\n type_registry_.RegisterTypeProvider(\n std::make_unique(\n google::protobuf::DescriptorPool::generated_pool(),\n google::protobuf::MessageFactory::generated_factory()));\n Resolver resolver(\"cel\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto type_value =\n resolver.FindConstant(\".google.api.expr.runtime.TestMessage\", -1);\n ASSERT_TRUE(type_value);\n ASSERT_TRUE(type_value->Is());\n EXPECT_THAT(type_value->GetType().name(),\n Eq(\"google.api.expr.runtime.TestMessage\"));\n}\nTEST_F(ResolverTest, TestFindConstantQualifiedTypeDisabled) {\n CelFunctionRegistry func_registry;\n type_registry_.RegisterTypeProvider(\n std::make_unique(\n google::protobuf::DescriptorPool::generated_pool(),\n google::protobuf::MessageFactory::generated_factory()));\n Resolver resolver(\"\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums(), false);\n auto type_value =\n resolver.FindConstant(\".google.api.expr.runtime.TestMessage\", -1);\n EXPECT_FALSE(type_value);\n}\nTEST_F(ResolverTest, FindTypeBySimpleName) {\n CelFunctionRegistry func_registry;\n Resolver resolver(\"google.api.expr.runtime\",\n func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n type_registry_.RegisterTypeProvider(\n std::make_unique(\n google::protobuf::DescriptorPool::generated_pool(),\n google::protobuf::MessageFactory::generated_factory()));\n ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType(\"TestMessage\", -1));\n EXPECT_TRUE(type.has_value());\n EXPECT_EQ(type->second.name(), \"google.api.expr.runtime.TestMessage\");\n}\nTEST_F(ResolverTest, FindTypeByQualifiedName) {\n CelFunctionRegistry func_registry;\n type_registry_.RegisterTypeProvider(\n std::make_unique(\n google::protobuf::DescriptorPool::generated_pool(),\n google::protobuf::MessageFactory::generated_factory()));\n Resolver resolver(\"google.api.expr.runtime\",\n func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n ASSERT_OK_AND_ASSIGN(\n auto type, resolver.FindType(\".google.api.expr.runtime.TestMessage\", -1));\n ASSERT_TRUE(type.has_value());\n EXPECT_EQ(type->second.name(), \"google.api.expr.runtime.TestMessage\");\n}\nTEST_F(ResolverTest, TestFindDescriptorNotFound) {\n CelFunctionRegistry func_registry;\n type_registry_.RegisterTypeProvider(\n std::make_unique(\n google::protobuf::DescriptorPool::generated_pool(),\n google::protobuf::MessageFactory::generated_factory()));\n Resolver resolver(\"google.api.expr.runtime\",\n func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType(\"UndefinedMessage\", -1));\n EXPECT_FALSE(type.has_value()) << type->second;\n}\nTEST_F(ResolverTest, TestFindOverloads) {\n CelFunctionRegistry func_registry;\n auto status =\n func_registry.Register(std::make_unique(\"fake_func\"));\n ASSERT_OK(status);\n status = func_registry.Register(\n std::make_unique(\"cel.fake_ns_func\"));\n ASSERT_OK(status);\n Resolver resolver(\"cel\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto overloads =\n resolver.FindOverloads(\"fake_func\", false, ArgumentsMatcher(0));\n EXPECT_THAT(overloads.size(), Eq(1));\n EXPECT_THAT(overloads[0].descriptor.name(), Eq(\"fake_func\"));\n overloads =\n resolver.FindOverloads(\"fake_ns_func\", false, ArgumentsMatcher(0));\n EXPECT_THAT(overloads.size(), Eq(1));\n EXPECT_THAT(overloads[0].descriptor.name(), Eq(\"cel.fake_ns_func\"));\n}\nTEST_F(ResolverTest, TestFindLazyOverloads) {\n CelFunctionRegistry func_registry;\n auto status = func_registry.RegisterLazyFunction(\n CelFunctionDescriptor{\"fake_lazy_func\", false, {}});\n ASSERT_OK(status);\n status = func_registry.RegisterLazyFunction(\n CelFunctionDescriptor{\"cel.fake_lazy_ns_func\", false, {}});\n ASSERT_OK(status);\n Resolver resolver(\"cel\", func_registry.InternalGetRegistry(),\n type_registry_.InternalGetModernRegistry(), value_factory_,\n type_registry_.resolveable_enums());\n auto overloads =\n resolver.FindLazyOverloads(\"fake_lazy_func\", false, ArgumentsMatcher(0));\n EXPECT_THAT(overloads.size(), Eq(1));\n overloads = resolver.FindLazyOverloads(\"fake_lazy_ns_func\", false,\n ArgumentsMatcher(0));\n EXPECT_THAT(overloads.size(), Eq(1));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver_test.cc"},"Commit Hash":{"kind":"string","value":"4552db5798fb0853b131b783d8875794334fae7f"}}},{"rowIdx":262,"cells":{"ID":{"kind":"string","value":"9f47e9a6-3306-4f59-a171-c8655f57841a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"exponential_minus_one"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h\"\n#include \n#include \"absl/status/status.h\"\n#include \"tensorflow/lite/experimental/shlo/bf16.h\"\n#include \"tensorflow/lite/experimental/shlo/dispatch.h\"\n#include \"tensorflow/lite/experimental/shlo/f16.h\"\n#include \"tensorflow/lite/experimental/shlo/ops/unary_elementwise.h\"\n#include \"tensorflow/lite/experimental/shlo/ops/util.h\"\n#include \"tensorflow/lite/experimental/shlo/tensor.h\"\nnamespace shlo_ref {\nstruct ExponentialMinusOne {\n template \n T operator()(T v) const {\n return std::expm1(v);\n }\n};\ntemplate <>\nF16 ExponentialMinusOne::operator()(F16 v) const {\n return F16(operator()(static_cast(v)));\n}\ntemplate <>\nBF16 ExponentialMinusOne::operator()(BF16 v) const {\n return BF16(operator()(static_cast(v)));\n}\nExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes) { return {}; }\nabsl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input,\n Tensor& output) {\n SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));\n SHLO_REF_RETURN_ON_ERROR(\n CheckSupportedTypes(CheckCtx(\"exponential_minus_one\"), input,\n IsFloatTensor, IsQuantizedPerTensorTensor));\n SHLO_REF_RETURN_ON_ERROR(\n CheckSameBaselineType(CheckCtx(\"exponential_minus_one\"), input, output));\n return absl::OkStatus();\n}\nabsl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input,\n Tensor& output) {\n ExponentialMinusOne exponential_minus_one;\n if (input.IsPerTensorQuantized()) {\n DISPATCH_QUANTIZED(\n detail::DequantizeOpQuantizePerTensor,\n input.quantized_per_tensor_element_type().StorageType(),\n input.quantized_per_tensor_element_type().ExpressedType(),\n exponential_minus_one, input, output)\n } else if (IsFloatTensor(input)) {\n DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),\n exponential_minus_one, input, output);\n }\n return absl::FailedPreconditionError(\n \"stablehlo.exponential_minus_one: Unsupported tensor type.\");\n}\n}; "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h\"\n#include \n#include \n#include \n#include \n#include \"tensorflow/lite/experimental/shlo/bf16.h\"\n#include \"tensorflow/lite/experimental/shlo/f16.h\"\n#include \"tensorflow/lite/experimental/shlo/ops/test_util.h\"\n#include \"tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h\"\n#include \"tensorflow/lite/experimental/shlo/quantize.h\"\n#include \"tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h\"\n#include \"tensorflow/lite/experimental/shlo/shape.h\"\n#include \"tensorflow/lite/experimental/shlo/status_matcher.h\"\n#include \"tensorflow/lite/experimental/shlo/tensor.h\"\nusing testing::ElementsAreArray;\nusing testing::NanSensitiveFloatEq;\nusing testing::Pointwise;\nnamespace shlo_ref {\ntemplate <>\nstruct ParamName {\n static std::string Get() { return \"ExponentialMinusOne\"; }\n};\nnamespace {\nstruct ExponentialMinusOne {\n template \n T operator()(T v) const {\n return std::expm1(v);\n }\n} exponential_minus_one_ref;\ntemplate <>\nF16 ExponentialMinusOne::operator()(F16 v) const {\n return F16(operator()(static_cast(v)));\n}\ntemplate <>\nBF16 ExponentialMinusOne::operator()(BF16 v) const {\n return BF16(operator()(static_cast(v)));\n}\nINSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOne,\n UnaryElementwiseOpShapePropagationTest,\n ExponentialMinusOneOp, TestParamNames);\nINSTANTIATE_TYPED_TEST_SUITE_P(\n ExponentialMinusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,\n UnaryElementwiseConstraint1Types, TestParamNames);\nusing UnsupportedTypes =\n WithOpTypes>;\nINSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOneOp,\n UnaryElementwiseUnsupportedTypeTest,\n UnsupportedTypes, TestParamNames);\ntemplate \nstruct ExponentialMinusOneTest : ::testing::Test {};\nTYPED_TEST_SUITE(ExponentialMinusOneTest, FloatTestTypes, TestParamNames);\nTYPED_TEST(ExponentialMinusOneTest, FloatTensorsWork) {\n using StorageT = typename TypeParam::StorageT;\n const Shape shape({2, 3, 4});\n Vector input_data = RandomBuffer(shape);\n Vector output_data(shape.NumElements());\n Tensor input_tensor{\n .type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},\n .data = input_data.data()};\n Tensor output_tensor{\n .type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},\n .data = output_data.data()};\n Vector expected_data(shape.NumElements());\n absl::c_transform(input_data, expected_data.begin(),\n exponential_minus_one_ref);\n auto op = Create(ExponentialMinusOneOp::Attributes{});\n ASSERT_OK(Prepare(op, input_tensor, output_tensor));\n ASSERT_OK(Evaluate(op, input_tensor, output_tensor));\n EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));\n}\ntemplate \nstruct QuantizedExponentialMinusOneTest : ::testing::Test {};\nTYPED_TEST_SUITE(QuantizedExponentialMinusOneTest, QuantizedTestTypes,\n TestParamNames);\nTYPED_TEST(QuantizedExponentialMinusOneTest, PerTensorWorks) {\n using StorageT = typename TypeParam::StorageT;\n using ExpressedT = typename TypeParam::ExpressedT;\n const Shape shape({2, 3, 4});\n Vector input_data = RandomBuffer(shape);\n Vector output_data(shape.NumElements());\n const ExpressedT scale = static_cast(1.5);\n const StorageT zero_point = static_cast(5);\n const QuantizedElementTypePerTensor tensor_type =\n QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,\n TypeParam::kExpressed, scale);\n Tensor input_tensor{\n .type = QuantizedPerTensorTensorType{.shape = shape,\n .element_type = tensor_type},\n .data = input_data.data()};\n Tensor output_tensor{\n .type = QuantizedPerTensorTensorType{.shape = shape,\n .element_type = tensor_type},\n .data = output_data.data()};\n Vector expected_data(shape.NumElements());\n absl::c_transform(\n input_data, expected_data.begin(), [zero_point, scale](auto v) {\n const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);\n const ExpressedT dequantized_res =\n exponential_minus_one_ref(dequantized_input);\n return Quantize(\n dequantized_res, zero_point, static_cast(1.) / scale);\n });\n auto op = Create(ExponentialMinusOneOp::Attributes{});\n ASSERT_OK(Prepare(op, input_tensor, output_tensor));\n ASSERT_OK(Evaluate(op, input_tensor, output_tensor));\n EXPECT_THAT(output_data, ElementsAreArray(expected_data));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":263,"cells":{"ID":{"kind":"string","value":"535afe38-1169-49f7-af7c-0d9f4af30529"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"chacha20_poly1305_decrypter"},"File Path in Repository":{"kind":"string","value":"quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc"},"File Path for Unit Test":{"kind":"string","value":"quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc"},"Code":{"kind":"string","value":"#include \"quiche/quic/core/crypto/chacha20_poly1305_decrypter.h\"\n#include \"openssl/aead.h\"\n#include \"openssl/tls1.h\"\nnamespace quic {\nnamespace {\nconst size_t kKeySize = 32;\nconst size_t kNonceSize = 12;\n} \nChaCha20Poly1305Decrypter::ChaCha20Poly1305Decrypter()\n : ChaChaBaseDecrypter(EVP_aead_chacha20_poly1305, kKeySize, kAuthTagSize,\n kNonceSize,\n false) {\n static_assert(kKeySize <= kMaxKeySize, \"key size too big\");\n static_assert(kNonceSize <= kMaxNonceSize, \"nonce size too big\");\n}\nChaCha20Poly1305Decrypter::~ChaCha20Poly1305Decrypter() {}\nuint32_t ChaCha20Poly1305Decrypter::cipher_id() const {\n return TLS1_CK_CHACHA20_POLY1305_SHA256;\n}\nQuicPacketCount ChaCha20Poly1305Decrypter::GetIntegrityLimit() const {\n static_assert(kMaxIncomingPacketSize < 16384,\n \"This key limit requires limits on decryption payload sizes\");\n return 68719476736U;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/quic/core/crypto/chacha20_poly1305_decrypter.h\"\n#include \n#include \n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/string_view.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/platform/api/quic_test.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n#include \"quiche/common/test_tools/quiche_test_utils.h\"\nnamespace {\nstruct TestVector {\n const char* key;\n const char* iv;\n const char* fixed;\n const char* aad;\n const char* ct;\n const char* pt; \n};\nconst TestVector test_vectors[] = {\n {\"808182838485868788898a8b8c8d8e8f\"\n \"909192939495969798999a9b9c9d9e9f\",\n \"4041424344454647\",\n \"07000000\",\n \"50515253c0c1c2c3c4c5c6c7\",\n \"d31a8d34648e60db7b86afbc53ef7ec2\"\n \"a4aded51296e08fea9e2b5a736ee62d6\"\n \"3dbea45e8ca9671282fafb69da92728b\"\n \"1a71de0a9e060b2905d6a5b67ecd3b36\"\n \"92ddbd7f2d778b8c9803aee328091b58\"\n \"fab324e4fad675945585808b4831d7bc\"\n \"3ff4def08e4b7a9de576d26586cec64b\"\n \"6116\"\n \"1ae10b594f09e26a7e902ecb\", \n \"4c616469657320616e642047656e746c\"\n \"656d656e206f662074686520636c6173\"\n \"73206f66202739393a20496620492063\"\n \"6f756c64206f6666657220796f75206f\"\n \"6e6c79206f6e652074697020666f7220\"\n \"746865206675747572652c2073756e73\"\n \"637265656e20776f756c642062652069\"\n \"742e\"},\n {\"808182838485868788898a8b8c8d8e8f\"\n \"909192939495969798999a9b9c9d9e9f\",\n \"4041424344454647\",\n \"07000000\",\n \"50515253c0c1c2c3c4c5c6c7\",\n \"d31a8d34648e60db7b86afbc53ef7ec2\"\n \"a4aded51296e08fea9e2b5a736ee62d6\"\n \"3dbea45e8ca9671282fafb69da92728b\"\n \"1a71de0a9e060b2905d6a5b67ecd3b36\"\n \"92ddbd7f2d778b8c9803aee328091b58\"\n \"fab324e4fad675945585808b4831d7bc\"\n \"3ff4def08e4b7a9de576d26586cec64b\"\n \"6116\"\n \"1ae10b594f09e26a7e902ecc\", \n nullptr},\n {\"808182838485868788898a8b8c8d8e8f\"\n \"909192939495969798999a9b9c9d9e9f\",\n \"4041424344454647\",\n \"07000000\",\n \"60515253c0c1c2c3c4c5c6c7\",\n \"d31a8d34648e60db7b86afbc53ef7ec2\"\n \"a4aded51296e08fea9e2b5a736ee62d6\"\n \"3dbea45e8ca9671282fafb69da92728b\"\n \"1a71de0a9e060b2905d6a5b67ecd3b36\"\n \"92ddbd7f2d778b8c9803aee328091b58\"\n \"fab324e4fad675945585808b4831d7bc\"\n \"3ff4def08e4b7a9de576d26586cec64b\"\n \"6116\"\n \"1ae10b594f09e26a7e902ecb\", \n nullptr},\n {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};\n} \nnamespace quic {\nnamespace test {\nQuicData* DecryptWithNonce(ChaCha20Poly1305Decrypter* decrypter,\n absl::string_view nonce,\n absl::string_view associated_data,\n absl::string_view ciphertext) {\n uint64_t packet_number;\n absl::string_view nonce_prefix(nonce.data(),\n nonce.size() - sizeof(packet_number));\n decrypter->SetNoncePrefix(nonce_prefix);\n memcpy(&packet_number, nonce.data() + nonce_prefix.size(),\n sizeof(packet_number));\n std::unique_ptr output(new char[ciphertext.length()]);\n size_t output_length = 0;\n const bool success = decrypter->DecryptPacket(\n packet_number, associated_data, ciphertext, output.get(), &output_length,\n ciphertext.length());\n if (!success) {\n return nullptr;\n }\n return new QuicData(output.release(), output_length, true);\n}\nclass ChaCha20Poly1305DecrypterTest : public QuicTest {};\nTEST_F(ChaCha20Poly1305DecrypterTest, Decrypt) {\n for (size_t i = 0; test_vectors[i].key != nullptr; i++) {\n bool has_pt = test_vectors[i].pt;\n std::string key;\n std::string iv;\n std::string fixed;\n std::string aad;\n std::string ct;\n std::string pt;\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].key, &key));\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].iv, &iv));\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].fixed, &fixed));\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].aad, &aad));\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].ct, &ct));\n if (has_pt) {\n ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].pt, &pt));\n }\n ChaCha20Poly1305Decrypter decrypter;\n ASSERT_TRUE(decrypter.SetKey(key));\n std::unique_ptr decrypted(DecryptWithNonce(\n &decrypter, fixed + iv,\n absl::string_view(aad.length() ? aad.data() : nullptr, aad.length()),\n ct));\n if (!decrypted) {\n EXPECT_FALSE(has_pt);\n continue;\n }\n EXPECT_TRUE(has_pt);\n EXPECT_EQ(12u, ct.size() - decrypted->length());\n ASSERT_EQ(pt.length(), decrypted->length());\n quiche::test::CompareCharArraysWithHexError(\n \"plaintext\", decrypted->data(), pt.length(), pt.data(), pt.length());\n }\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":264,"cells":{"ID":{"kind":"string","value":"13bd3235-e3de-4413-be34-c4c9024651b4"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/libaddressinput"},"File Name":{"kind":"string","value":"address_problem"},"File Path in Repository":{"kind":"string","value":"cpp/src/address_problem.cc"},"File Path for Unit Test":{"kind":"string","value":"cpp/test/address_problem_test.cc"},"Code":{"kind":"string","value":"#include \n#include \n#include \n#include \"util/size.h\"\nusing i18n::addressinput::AddressProblem;\nusing i18n::addressinput::size;\nusing i18n::addressinput::UNEXPECTED_FIELD;\nusing i18n::addressinput::UNSUPPORTED_FIELD;\nstd::ostream& operator<<(std::ostream& o, AddressProblem problem) {\n static const char* const kProblemNames[] = {\n \"UNEXPECTED_FIELD\", \"MISSING_REQUIRED_FIELD\", \"UNKNOWN_VALUE\",\n \"INVALID_FORMAT\", \"MISMATCHING_VALUE\", \"USES_P_O_BOX\",\n \"UNSUPPORTED_FIELD\",\n };\n static_assert(UNEXPECTED_FIELD == 0, \"bad_base\");\n static_assert(UNSUPPORTED_FIELD == size(kProblemNames) - 1, \"bad_length\");\n if (problem < 0 || static_cast(problem) >= size(kProblemNames)) {\n o << \"[INVALID ENUM VALUE \" << static_cast(problem) << \"]\";\n } else {\n o << kProblemNames[problem];\n }\n return o;\n}"},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \nnamespace {\nusing i18n::addressinput::UNKNOWN_VALUE;\nTEST(AddressProblemTest, ValidEnumValue) {\n std::ostringstream oss;\n oss << UNKNOWN_VALUE;\n EXPECT_EQ(\"UNKNOWN_VALUE\", oss.str());\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_problem.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_problem_test.cc"},"Commit Hash":{"kind":"string","value":"2610f7b1043d6784ada41392fc9392d1ea09ea07"}}},{"rowIdx":265,"cells":{"ID":{"kind":"string","value":"54ac5196-c078-4979-b2bd-62baf3bb4e06"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"fake_clock_env"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/util/fake_clock_env.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/util/fake_clock_env_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/util/fake_clock_env.h\"\n#include \nnamespace tensorflow {\nFakeClockEnv::FakeClockEnv(Env* wrapped) : EnvWrapper(wrapped) {}\nvoid FakeClockEnv::AdvanceByMicroseconds(int64_t micros) {\n {\n mutex_lock l(mu_);\n current_time_ += micros;\n }\n}\nuint64 FakeClockEnv::NowMicros() const {\n {\n mutex_lock l(mu_);\n return current_time_;\n }\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/util/fake_clock_env.h\"\n#include \n#include \n#include \"tensorflow/core/platform/env.h\"\nnamespace tensorflow {\nnamespace {\nclass FakeClockEnvTest : public ::testing::Test {\n protected:\n void SetUp() override {\n fake_clock_env_ = std::make_unique(Env::Default());\n }\n void TearDown() override { fake_clock_env_.reset(); }\n std::unique_ptr fake_clock_env_;\n};\nTEST_F(FakeClockEnvTest, TimeInitializedToZero) {\n EXPECT_EQ(0, fake_clock_env_->NowMicros());\n}\nTEST_F(FakeClockEnvTest, AdvanceTimeByMicroseconds) {\n int current_time = fake_clock_env_->NowMicros();\n int64_t duration = 100;\n current_time += duration;\n fake_clock_env_->AdvanceByMicroseconds(duration);\n EXPECT_EQ(current_time, fake_clock_env_->NowMicros());\n for (int i = 0; i < 5; ++i) {\n fake_clock_env_->AdvanceByMicroseconds(100);\n current_time += 100;\n }\n EXPECT_EQ(current_time, fake_clock_env_->NowMicros());\n current_time += duration;\n duration = 200;\n fake_clock_env_->AdvanceByMicroseconds(duration);\n EXPECT_NE(current_time, fake_clock_env_->NowMicros());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":266,"cells":{"ID":{"kind":"string","value":"eef8dca3-7df6-482a-8512-d6ab41154110"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tensor_testutil"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/framework/tensor_testutil.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/framework/tensor_testutil_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/platform/types.h\"\nnamespace tensorflow {\nnamespace test {\n::testing::AssertionResult IsSameType(const Tensor& x, const Tensor& y) {\n if (x.dtype() != y.dtype()) {\n return ::testing::AssertionFailure()\n << \"Tensors have different dtypes (\" << x.dtype() << \" vs \"\n << y.dtype() << \")\";\n }\n return ::testing::AssertionSuccess();\n}\n::testing::AssertionResult IsSameShape(const Tensor& x, const Tensor& y) {\n if (!x.IsSameSize(y)) {\n return ::testing::AssertionFailure()\n << \"Tensors have different shapes (\" << x.shape().DebugString()\n << \" vs \" << y.shape().DebugString() << \")\";\n }\n return ::testing::AssertionSuccess();\n}\ntemplate \nstatic ::testing::AssertionResult EqualFailure(const T& x, const T& y) {\n return ::testing::AssertionFailure()\n << std::setprecision(std::numeric_limits::digits10 + 2) << x\n << \" not equal to \" << y;\n}\ntemplate <>\n::testing::AssertionResult EqualFailure(const int8& x, const int8& y) {\n return EqualFailure(static_cast(x), static_cast(y));\n}\nstatic ::testing::AssertionResult IsEqual(float x, float y, Tolerance t) {\n if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))\n return ::testing::AssertionSuccess();\n if (t == Tolerance::kNone) {\n if (x == y) return ::testing::AssertionSuccess();\n } else {\n if (::testing::internal::CmpHelperFloatingPointEQ(\"\", \"\", x, y))\n return ::testing::AssertionSuccess();\n }\n return EqualFailure(x, y);\n}\nstatic ::testing::AssertionResult IsEqual(double x, double y, Tolerance t) {\n if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))\n return ::testing::AssertionSuccess();\n if (t == Tolerance::kNone) {\n if (x == y) return ::testing::AssertionSuccess();\n } else {\n if (::testing::internal::CmpHelperFloatingPointEQ(\"\", \"\", x, y))\n return ::testing::AssertionSuccess();\n }\n return EqualFailure(x, y);\n}\nstatic ::testing::AssertionResult IsEqual(Eigen::half x, Eigen::half y,\n Tolerance t) {\n if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))\n return ::testing::AssertionSuccess();\n if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))\n return EqualFailure(x, y);\n auto sign_and_magnitude_to_biased = [](uint16_t sam) {\n const uint16_t kSignBitMask = 0x8000;\n if (kSignBitMask & sam) return ~sam + 1; \n return kSignBitMask | sam; \n };\n auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast(x));\n auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast(y));\n if (t == Tolerance::kNone) {\n if (xb == yb) return ::testing::AssertionSuccess();\n } else {\n auto distance = xb >= yb ? xb - yb : yb - xb;\n const uint16_t kMaxUlps = 4;\n if (distance <= kMaxUlps) return ::testing::AssertionSuccess();\n }\n return EqualFailure(x, y);\n}\nstatic ::testing::AssertionResult IsEqual(tsl::bfloat16 x, tsl::bfloat16 y,\n Tolerance t) {\n if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))\n return ::testing::AssertionSuccess();\n if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))\n return EqualFailure(x, y);\n auto sign_and_magnitude_to_biased = [](uint16_t sam) {\n const uint16_t kSignBitMask = 0x8000;\n if (kSignBitMask & sam) return ~sam + 1; \n return kSignBitMask | sam; \n };\n auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast(x));\n auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast(y));\n if (t == Tolerance::kNone) {\n if (xb == yb) return ::testing::AssertionSuccess();\n } else {\n auto distance = xb >= yb ? xb - yb : yb - xb;\n const uint16_t kMaxUlps = 4;\n if (distance <= kMaxUlps) return ::testing::AssertionSuccess();\n }\n return EqualFailure(x, y);\n}\ntemplate \nstatic ::testing::AssertionResult IsEqual(const T& x, const T& y, Tolerance t) {\n if (::testing::internal::CmpHelperEQ(\"\", \"\", x, y))\n return ::testing::AssertionSuccess();\n return EqualFailure(x, y);\n}\ntemplate \nstatic ::testing::AssertionResult IsEqual(const std::complex& x,\n const std::complex& y,\n Tolerance t) {\n if (IsEqual(x.real(), y.real(), t) && IsEqual(x.imag(), y.imag(), t))\n return ::testing::AssertionSuccess();\n return EqualFailure(x, y);\n}\ntemplate \nstatic void ExpectEqual(const Tensor& x, const Tensor& y,\n Tolerance t = Tolerance::kDefault) {\n const T* Tx = x.unaligned_flat().data();\n const T* Ty = y.unaligned_flat().data();\n auto size = x.NumElements();\n int max_failures = 10;\n int num_failures = 0;\n for (decltype(size) i = 0; i < size; ++i) {\n EXPECT_TRUE(IsEqual(Tx[i], Ty[i], t)) << \"i = \" << (++num_failures, i);\n ASSERT_LT(num_failures, max_failures) << \"Too many mismatches, giving up.\";\n }\n}\ntemplate \nstatic ::testing::AssertionResult IsClose(const T& x, const T& y, const T& atol,\n const T& rtol) {\n if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))\n return ::testing::AssertionSuccess();\n if (x == y) return ::testing::AssertionSuccess(); \n auto tolerance = atol + rtol * Eigen::numext::abs(x);\n if (Eigen::numext::abs(x - y) <= tolerance)\n return ::testing::AssertionSuccess();\n return ::testing::AssertionFailure() << x << \" not close to \" << y;\n}\ntemplate \nstatic ::testing::AssertionResult IsClose(const std::complex& x,\n const std::complex& y,\n const T& atol, const T& rtol) {\n if (IsClose(x.real(), y.real(), atol, rtol) &&\n IsClose(x.imag(), y.imag(), atol, rtol))\n return ::testing::AssertionSuccess();\n return ::testing::AssertionFailure() << x << \" not close to \" << y;\n}\ntemplate \nstatic auto GetTolerance(double tolerance) {\n using Real = typename Eigen::NumTraits::Real;\n auto default_tol = static_cast(5.0) * Eigen::NumTraits::epsilon();\n auto result = tolerance < 0.0 ? default_tol : static_cast(tolerance);\n EXPECT_GE(result, static_cast(0));\n return result;\n}\ntemplate \nstatic void ExpectClose(const Tensor& x, const Tensor& y, double atol,\n double rtol) {\n auto typed_atol = GetTolerance(atol);\n auto typed_rtol = GetTolerance(rtol);\n const T* Tx = x.unaligned_flat().data();\n const T* Ty = y.unaligned_flat().data();\n auto size = x.NumElements();\n int max_failures = 10;\n int num_failures = 0;\n for (decltype(size) i = 0; i < size; ++i) {\n EXPECT_TRUE(IsClose(Tx[i], Ty[i], typed_atol, typed_rtol))\n << \"i = \" << (++num_failures, i) << \" Tx[i] = \" << Tx[i]\n << \" Ty[i] = \" << Ty[i];\n ASSERT_LT(num_failures, max_failures)\n << \"Too many mismatches (atol = \" << atol << \" rtol = \" << rtol\n << \"), giving up.\";\n }\n EXPECT_EQ(num_failures, 0)\n << \"Mismatches detected (atol = \" << atol << \" rtol = \" << rtol << \").\";\n}\nvoid ExpectEqual(const Tensor& x, const Tensor& y, Tolerance t) {\n ASSERT_TRUE(IsSameType(x, y));\n ASSERT_TRUE(IsSameShape(x, y));\n switch (x.dtype()) {\n case DT_FLOAT:\n return ExpectEqual(x, y, t);\n case DT_DOUBLE:\n return ExpectEqual(x, y, t);\n case DT_INT32:\n return ExpectEqual(x, y);\n case DT_UINT32:\n return ExpectEqual(x, y);\n case DT_UINT16:\n return ExpectEqual(x, y);\n case DT_UINT8:\n return ExpectEqual(x, y);\n case DT_INT16:\n return ExpectEqual(x, y);\n case DT_INT8:\n return ExpectEqual(x, y);\n case DT_STRING:\n return ExpectEqual(x, y);\n case DT_COMPLEX64:\n return ExpectEqual(x, y, t);\n case DT_COMPLEX128:\n return ExpectEqual(x, y, t);\n case DT_INT64:\n return ExpectEqual(x, y);\n case DT_UINT64:\n return ExpectEqual(x, y);\n case DT_BOOL:\n return ExpectEqual(x, y);\n case DT_QINT8:\n return ExpectEqual(x, y);\n case DT_QUINT8:\n return ExpectEqual(x, y);\n case DT_QINT16:\n return ExpectEqual(x, y);\n case DT_QUINT16:\n return ExpectEqual(x, y);\n case DT_QINT32:\n return ExpectEqual(x, y);\n case DT_BFLOAT16:\n return ExpectEqual(x, y, t);\n case DT_HALF:\n return ExpectEqual(x, y, t);\n case DT_FLOAT8_E5M2:\n return ExpectEqual(x, y, t);\n case DT_FLOAT8_E4M3FN:\n return ExpectEqual(x, y, t);\n case DT_INT4:\n return ExpectEqual(x, y, t);\n case DT_UINT4:\n return ExpectEqual(x, y, t);\n default:\n EXPECT_TRUE(false) << \"Unsupported type : \" << DataTypeString(x.dtype());\n }\n}\nvoid ExpectClose(const Tensor& x, const Tensor& y, double atol, double rtol) {\n ASSERT_TRUE(IsSameType(x, y));\n ASSERT_TRUE(IsSameShape(x, y));\n switch (x.dtype()) {\n case DT_HALF:\n return ExpectClose(x, y, atol, rtol);\n case DT_BFLOAT16:\n return ExpectClose(x, y, atol, rtol);\n case DT_FLOAT:\n return ExpectClose(x, y, atol, rtol);\n case DT_DOUBLE:\n return ExpectClose(x, y, atol, rtol);\n case DT_COMPLEX64:\n return ExpectClose(x, y, atol, rtol);\n case DT_COMPLEX128:\n return ExpectClose(x, y, atol, rtol);\n default:\n EXPECT_TRUE(false) << \"Unsupported type : \" << DataTypeString(x.dtype());\n }\n}\n::testing::AssertionResult internal_test::IsClose(Eigen::half x, Eigen::half y,\n double atol, double rtol) {\n return test::IsClose(x, y, GetTolerance(atol),\n GetTolerance(rtol));\n}\n::testing::AssertionResult internal_test::IsClose(float x, float y, double atol,\n double rtol) {\n return test::IsClose(x, y, GetTolerance(atol),\n GetTolerance(rtol));\n}\n::testing::AssertionResult internal_test::IsClose(double x, double y,\n double atol, double rtol) {\n return test::IsClose(x, y, GetTolerance(atol),\n GetTolerance(rtol));\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace test {\nnamespace {\nusing internal_test::IsClose;\ntemplate \nvoid TestEdgeCasesNear() {\n EXPECT_TRUE(IsClose(Eigen::NumTraits::infinity(),\n Eigen::NumTraits::infinity(), 0.0, 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::lowest(),\n Eigen::NumTraits::highest(),\n Eigen::NumTraits::infinity(), 0.0));\n EXPECT_FALSE(\n IsClose(Eigen::NumTraits::lowest(), Eigen::NumTraits::highest(),\n static_cast(Eigen::NumTraits::highest()), 0.0));\n EXPECT_FALSE(IsClose(Eigen::NumTraits::quiet_NaN(), T(0.0), 0.0, 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::quiet_NaN(), 0.0, 0.0));\n EXPECT_FALSE(IsClose(Eigen::NumTraits::quiet_NaN(), T(0.0),\n Eigen::NumTraits::infinity(), 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::infinity(), 0.0));\n}\ntemplate \nvoid dumpFloatingPointStorage(T value) {\n U* integral = reinterpret_cast(&value);\n int shift_amount = (sizeof(U) << 3) - 1;\n int exponent_bits = 2 + (log2(sizeof(U)) * 3);\n U mask = static_cast(1) << shift_amount;\n for (int bits = 0; bits <= shift_amount; ++bits) {\n std::cout << ((*integral & mask) > 0);\n if (bits == 0 || bits == exponent_bits) std::cout << \" \";\n mask >>= 1;\n }\n std::cout << std::endl;\n printf(\"%.20lf\\n\", static_cast(value));\n}\nTEST(TensorTestUtilTest, ExpectTensorNearHalf) {\n typedef Eigen::half T;\n EXPECT_TRUE(IsClose(static_cast(1.0f), static_cast(1.0f), 0.0, 0.0));\n EXPECT_TRUE(IsClose(static_cast(0.0f), static_cast(-0.0f), 0.0, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(3.141592f), static_cast(3.141592f), 0.0, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(8.9875f), static_cast(8.99f), 0.0078125, 0.0));\n EXPECT_FALSE(\n IsClose(static_cast(8.9875f), static_cast(8.99f), 0.007, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(720.2f), static_cast(720.3f), 0.5, 0.0));\n EXPECT_FALSE(\n IsClose(static_cast(720.2f), static_cast(720.3f), 0.4, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(1234.f), static_cast(1235.f), 1.0, 0.0));\n EXPECT_FALSE(\n IsClose(static_cast(1234.5f), static_cast(1235.f), 0.5, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(1234.5f), static_cast(1235.f), 1.0, 0.0));\n EXPECT_TRUE(\n IsClose(static_cast(-2.71f), static_cast(-2.72f), 0.01, 0.0));\n TestEdgeCasesNear();\n}\nTEST(TensorTestUtilTest, ExpectTensorNearFloat) {\n typedef float T;\n EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));\n EXPECT_TRUE(IsClose(0.0f, -0.0f, 0.0f, 0.0f));\n EXPECT_TRUE(IsClose(3.14159265359f, 3.14159265359f, 0.0f, 0.0f));\n EXPECT_TRUE(IsClose(8.9875f, 8.9876f, 0.0001002f, 0.0f));\n EXPECT_FALSE(IsClose(8.9875f, 8.9876f, 0.0001f, 0.0f));\n EXPECT_TRUE(IsClose(720.2017f, 720.2018f, 0.0001f, 0.0f));\n EXPECT_FALSE(IsClose(720.20175f, 720.20185f, 0.0001f, 0.0f));\n EXPECT_TRUE(IsClose(720.20175f, 720.20185f, 0.00013f, 0.0f));\n EXPECT_FALSE(IsClose(123456788.f, 123456789.f, 4.0f, 0.0f));\n EXPECT_TRUE(IsClose(123456788.f, 123456789.f, 8.0f, 0.0f));\n EXPECT_TRUE(IsClose(-2.718281f, -2.718282f, 0.1f, 0.0f));\n TestEdgeCasesNear();\n}\nTEST(TensorTestUtilTest, ExpectTensorNearDouble) {\n typedef double T;\n EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));\n EXPECT_TRUE(IsClose(0.0, -0.0, 0.0, 0.0));\n EXPECT_TRUE(IsClose(3.14159265359, 3.14159265359, 0.0, 0.0));\n EXPECT_TRUE(IsClose(8.9875, 8.9876, 0.0001, 0.0));\n EXPECT_FALSE(IsClose(100720.2018, 100720.2019, 0.0001, 0.0));\n EXPECT_TRUE(IsClose(100720.2018, 100720.2019, 1.00000005e-4, 0.0));\n EXPECT_FALSE(IsClose(12345678901234567., 12345678901234566., 1.0, 0.0));\n EXPECT_TRUE(IsClose(12345678901234567., 12345678901234566., 2.0, 0.0));\n EXPECT_FALSE(IsClose(-2.71828182846, -2.71828182847, 1.0e-11, 0.0));\n EXPECT_TRUE(IsClose(-2.71828182846, -2.71828182847, 1.00000009e-11, 0.0));\n TestEdgeCasesNear();\n}\nTEST(TensorTestUtilTest, ExpectTensorNearSlice) {\n Tensor x(DT_FLOAT, TensorShape({7, 3}));\n test::FillFn(&x, [](int i) { return 1.0f; });\n test::ExpectTensorNear(\n x.SubSlice(3), test::AsTensor({1.0, 1.0, 1.0}, TensorShape({3})),\n 1e-10);\n}\ntemplate \nvoid TestEdgeCasesClose() {\n EXPECT_TRUE(IsClose(Eigen::NumTraits::infinity(),\n Eigen::NumTraits::infinity(), 0.0, 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::lowest(),\n Eigen::NumTraits::highest(),\n Eigen::NumTraits::infinity(),\n Eigen::NumTraits::infinity()));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::lowest(),\n Eigen::NumTraits::highest(),\n static_cast(Eigen::NumTraits::highest()),\n static_cast(Eigen::NumTraits::highest())));\n EXPECT_FALSE(IsClose(Eigen::NumTraits::quiet_NaN(), T(0.0), 0.0, 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::quiet_NaN(), 0.0, 0.0));\n EXPECT_FALSE(IsClose(Eigen::NumTraits::quiet_NaN(), T(0.0),\n Eigen::NumTraits::infinity(), 0.0));\n EXPECT_TRUE(IsClose(Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::quiet_NaN(),\n Eigen::NumTraits::infinity(), 0.0));\n}\nTEST(TensorTestUtilTest, ExpectTensorCloseHalf) {\n typedef Eigen::half T;\n EXPECT_TRUE(IsClose(static_cast(1.0f), static_cast(1.1f), 0.1, 0.1));\n EXPECT_TRUE(IsClose(static_cast(1.0f), static_cast(1.0f), 0.0, 0.0));\n EXPECT_FALSE(IsClose(static_cast(1.0f), static_cast(1.1f), 0.0, 0.0));\n EXPECT_TRUE(IsClose(static_cast(1.234f), static_cast(1.234f)));\n EXPECT_TRUE(IsClose(static_cast(1.234f), static_cast(1.233f)));\n EXPECT_TRUE(IsClose(static_cast(1.234f), static_cast(1.235f)));\n EXPECT_FALSE(IsClose(static_cast(1.234f), static_cast(1.232f)));\n EXPECT_FALSE(IsClose(static_cast(1.234f), static_cast(1.236f)));\n EXPECT_TRUE(\n IsClose(static_cast(1.234f), static_cast(1.232f), 8e-4f, 1e-3f));\n EXPECT_TRUE(\n IsClose(static_cast(1.234f), static_cast(1.236f), 1.4e-3f, 5e-4f));\n EXPECT_TRUE(\n IsClose(static_cast(3.141592f), static_cast(3.141593f), 0.0, 0.0));\n EXPECT_FALSE(IsClose(static_cast(1e4f), static_cast(1e-4f)));\n TestEdgeCasesClose();\n}\nTEST(TensorTestUtilTest, ExpectTensorCloseFloat) {\n typedef float T;\n EXPECT_TRUE(IsClose(1.0f, 1.1f, 0.1f, 0.1f));\n EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));\n EXPECT_FALSE(IsClose(1.0f, 1.1f, 0.0f, 0.0f));\n EXPECT_TRUE(IsClose(1.234567f, 1.234567f));\n EXPECT_TRUE(IsClose(1.234567f, 1.234568f));\n EXPECT_TRUE(IsClose(1.234567f, 1.234566f));\n EXPECT_FALSE(IsClose(1.234567f, 1.234569f));\n EXPECT_FALSE(IsClose(1.234567f, 1.234565f));\n EXPECT_TRUE(IsClose(1.234567f, 1.234569f, 8e-7f, 1e-6f));\n EXPECT_TRUE(IsClose(1.234567f, 1.234565f, 3e-7f, 1.5e-6f));\n EXPECT_TRUE(IsClose(3.14159265f, 3.14159266f, 0.0f, 0.0f));\n EXPECT_FALSE(IsClose(1e8f, 1e-8f));\n EXPECT_FALSE(IsClose(1e15f, 1e-15f));\n TestEdgeCasesClose();\n}\nTEST(TensorTestUtilTest, ExpectTensorCloseDouble) {\n typedef double T;\n EXPECT_TRUE(IsClose(1.0, 1.1, 0.1, 0.1));\n EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));\n EXPECT_FALSE(IsClose(1.0, 1.1, 0.0, 0.0));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123456));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123457));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123455));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123458));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123454));\n EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123459));\n EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123453));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123459, 9.5e-16, 1.6e-15));\n EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123453, 7e-16, 2e-15));\n EXPECT_TRUE(IsClose(3.141592653589793238, 3.141592653589793239, 0.0, 0.0));\n EXPECT_FALSE(IsClose(1e15, 1e-15));\n EXPECT_FALSE(IsClose(1e30, 1e-30));\n TestEdgeCasesClose();\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":267,"cells":{"ID":{"kind":"string","value":"5c396182-72ac-466f-b33e-1ce61dd04742"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"nghttp2"},"File Path in Repository":{"kind":"string","value":"quiche/http2/adapter/nghttp2.h"},"File Path for Unit Test":{"kind":"string","value":"quiche/http2/adapter/nghttp2_test.cc"},"Code":{"kind":"string","value":"#ifndef QUICHE_HTTP2_ADAPTER_NGHTTP2_H_\n#define QUICHE_HTTP2_ADAPTER_NGHTTP2_H_\n#include \nusing ssize_t = ptrdiff_t;\n#include \"nghttp2/nghttp2.h\" \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/http2/adapter/nghttp2.h\"\n#include \n#include \n#include \n#include \"absl/strings/str_cat.h\"\n#include \"quiche/http2/adapter/mock_nghttp2_callbacks.h\"\n#include \"quiche/http2/adapter/nghttp2_test_utils.h\"\n#include \"quiche/http2/adapter/nghttp2_util.h\"\n#include \"quiche/http2/adapter/test_frame_sequence.h\"\n#include \"quiche/http2/adapter/test_utils.h\"\n#include \"quiche/common/platform/api/quiche_test.h\"\nnamespace http2 {\nnamespace adapter {\nnamespace test {\nnamespace {\nusing testing::_;\nenum FrameType {\n DATA,\n HEADERS,\n PRIORITY,\n RST_STREAM,\n SETTINGS,\n PUSH_PROMISE,\n PING,\n GOAWAY,\n WINDOW_UPDATE,\n};\nnghttp2_option* GetOptions() {\n nghttp2_option* options;\n nghttp2_option_new(&options);\n nghttp2_option_set_no_closed_streams(options, 1);\n nghttp2_option_set_no_auto_window_update(options, 1);\n nghttp2_option_set_max_send_header_block_length(options, 0x2000000);\n nghttp2_option_set_max_outbound_ack(options, 10000);\n return options;\n}\nclass Nghttp2Test : public quiche::test::QuicheTest {\n public:\n Nghttp2Test() : session_(MakeSessionPtr(nullptr)) {}\n void SetUp() override { InitializeSession(); }\n virtual Perspective GetPerspective() = 0;\n void InitializeSession() {\n auto nghttp2_callbacks = MockNghttp2Callbacks::GetCallbacks();\n nghttp2_option* options = GetOptions();\n nghttp2_session* ptr;\n if (GetPerspective() == Perspective::kClient) {\n nghttp2_session_client_new2(&ptr, nghttp2_callbacks.get(),\n &mock_callbacks_, options);\n } else {\n nghttp2_session_server_new2(&ptr, nghttp2_callbacks.get(),\n &mock_callbacks_, options);\n }\n nghttp2_option_del(options);\n EXPECT_CALL(mock_callbacks_, Send(_, _, _))\n .WillRepeatedly(\n [this](const uint8_t* data, size_t length, int ) {\n absl::StrAppend(&serialized_, ToStringView(data, length));\n return length;\n });\n EXPECT_CALL(mock_callbacks_, SendData(_, _, _, _))\n .WillRepeatedly([this](nghttp2_frame* , const uint8_t* framehd,\n size_t length, nghttp2_data_source* source) {\n QUICHE_LOG(INFO) << \"Appending frame header and \" << length\n << \" bytes of data\";\n auto* s = static_cast(source->ptr);\n absl::StrAppend(&serialized_, ToStringView(framehd, 9),\n s->ReadNext(length));\n return 0;\n });\n session_ = MakeSessionPtr(ptr);\n }\n testing::StrictMock mock_callbacks_;\n nghttp2_session_unique_ptr session_;\n std::string serialized_;\n};\nclass Nghttp2ClientTest : public Nghttp2Test {\n public:\n Perspective GetPerspective() override { return Perspective::kClient; }\n};\nTEST_F(Nghttp2ClientTest, ClientReceivesUnexpectedHeaders) {\n const std::string initial_frames = TestFrameSequence()\n .ServerPreface()\n .Ping(42)\n .WindowUpdate(0, 1000)\n .Serialize();\n testing::InSequence seq;\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0)));\n EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, PING, 0)));\n EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsPing(42)));\n EXPECT_CALL(mock_callbacks_,\n OnBeginFrame(HasFrameHeader(0, WINDOW_UPDATE, 0)));\n EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsWindowUpdate(1000)));\n ssize_t result = nghttp2_session_mem_recv(\n session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());\n ASSERT_EQ(result, initial_frames.size());\n const std::string unexpected_stream_frames =\n TestFrameSequence()\n .Headers(1,\n {{\":status\", \"200\"},\n {\"server\", \"my-fake-server\"},\n {\"date\", \"Tue, 6 Apr 2021 12:54:01 GMT\"}},\n false)\n .Data(1, \"This is the response body.\")\n .RstStream(3, Http2ErrorCode::INTERNAL_ERROR)\n .GoAway(5, Http2ErrorCode::ENHANCE_YOUR_CALM, \"calm down!!\")\n .Serialize();\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(1, HEADERS, _)));\n EXPECT_CALL(mock_callbacks_, OnInvalidFrameRecv(IsHeaders(1, _, _), _));\n nghttp2_session_mem_recv(session_.get(),\n ToUint8Ptr(unexpected_stream_frames.data()),\n unexpected_stream_frames.size());\n}\nTEST_F(Nghttp2ClientTest, ClientSendsRequest) {\n int result = nghttp2_session_send(session_.get());\n ASSERT_EQ(result, 0);\n EXPECT_THAT(serialized_, testing::StrEq(spdy::kHttp2ConnectionHeaderPrefix));\n serialized_.clear();\n const std::string initial_frames =\n TestFrameSequence().ServerPreface().Serialize();\n testing::InSequence s;\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0)));\n EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));\n ssize_t recv_result = nghttp2_session_mem_recv(\n session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());\n EXPECT_EQ(initial_frames.size(), recv_result);\n EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsSettings(testing::IsEmpty())));\n EXPECT_CALL(mock_callbacks_, OnFrameSend(IsSettings(testing::IsEmpty())));\n EXPECT_TRUE(nghttp2_session_want_write(session_.get()));\n result = nghttp2_session_send(session_.get());\n EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::SETTINGS}));\n serialized_.clear();\n EXPECT_FALSE(nghttp2_session_want_write(session_.get()));\n std::vector> headers = {\n {\":method\", \"POST\"},\n {\":scheme\", \"http\"},\n {\":authority\", \"example.com\"},\n {\":path\", \"/this/is/request/one\"}};\n std::vector nvs;\n for (const auto& h : headers) {\n nvs.push_back({.name = ToUint8Ptr(h.first.data()),\n .value = ToUint8Ptr(h.second.data()),\n .namelen = h.first.size(),\n .valuelen = h.second.size(),\n .flags = NGHTTP2_NV_FLAG_NONE});\n }\n const absl::string_view kBody = \"This is an example request body.\";\n TestDataSource source{kBody};\n nghttp2_data_provider provider = source.MakeDataProvider();\n int stream_id =\n nghttp2_submit_request(session_.get(), nullptr , nvs.data(),\n nvs.size(), &provider, nullptr );\n EXPECT_GT(stream_id, 0);\n EXPECT_TRUE(nghttp2_session_want_write(session_.get()));\n EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsHeaders(stream_id, _, _)));\n EXPECT_CALL(mock_callbacks_, OnFrameSend(IsHeaders(stream_id, _, _)));\n EXPECT_CALL(mock_callbacks_, OnFrameSend(IsData(stream_id, kBody.size(), _)));\n nghttp2_session_send(session_.get());\n EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::HEADERS,\n spdy::SpdyFrameType::DATA}));\n EXPECT_THAT(serialized_, testing::HasSubstr(kBody));\n EXPECT_FALSE(nghttp2_session_want_write(session_.get()));\n}\nclass Nghttp2ServerTest : public Nghttp2Test {\n public:\n Perspective GetPerspective() override { return Perspective::kServer; }\n};\nTEST_F(Nghttp2ServerTest, MismatchedContentLength) {\n const std::string initial_frames =\n TestFrameSequence()\n .ClientPreface()\n .Headers(1,\n {{\":method\", \"POST\"},\n {\":scheme\", \"https\"},\n {\":authority\", \"example.com\"},\n {\":path\", \"/\"},\n {\"content-length\", \"50\"}},\n false)\n .Data(1, \"Less than 50 bytes.\", true)\n .Serialize();\n testing::InSequence seq;\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, _)));\n EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));\n EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(\n 1, HEADERS, NGHTTP2_FLAG_END_HEADERS)));\n EXPECT_CALL(mock_callbacks_,\n OnBeginHeaders(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS,\n NGHTTP2_HCAT_REQUEST)));\n EXPECT_CALL(mock_callbacks_, OnHeader(_, \":method\", \"POST\", _));\n EXPECT_CALL(mock_callbacks_, OnHeader(_, \":scheme\", \"https\", _));\n EXPECT_CALL(mock_callbacks_, OnHeader(_, \":authority\", \"example.com\", _));\n EXPECT_CALL(mock_callbacks_, OnHeader(_, \":path\", \"/\", _));\n EXPECT_CALL(mock_callbacks_, OnHeader(_, \"content-length\", \"50\", _));\n EXPECT_CALL(mock_callbacks_,\n OnFrameRecv(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS,\n NGHTTP2_HCAT_REQUEST)));\n EXPECT_CALL(mock_callbacks_,\n OnBeginFrame(HasFrameHeader(1, DATA, NGHTTP2_FLAG_END_STREAM)));\n EXPECT_CALL(mock_callbacks_, OnDataChunkRecv(NGHTTP2_FLAG_END_STREAM, 1,\n \"Less than 50 bytes.\"));\n ssize_t result = nghttp2_session_mem_recv(\n session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());\n ASSERT_EQ(result, initial_frames.size());\n}\n} \n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":268,"cells":{"ID":{"kind":"string","value":"17e0abbc-0854-4ec0-a080-901466566b64"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"snapshot_manager"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/data/service/snapshot/snapshot_manager.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/data/service/snapshot/snapshot_manager_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/data/service/snapshot/snapshot_manager.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/btree_map.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/log.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/time.h\"\n#include \"xla/tsl/lib/io/compression.h\"\n#include \"xla/tsl/protobuf/status.pb.h\"\n#include \"tensorflow/core/data/service/common.pb.h\"\n#include \"tensorflow/core/data/service/dispatcher.pb.h\"\n#include \"tensorflow/core/data/service/snapshot/file_utils.h\"\n#include \"tensorflow/core/data/service/snapshot/path_utils.h\"\n#include \"tensorflow/core/data/service/snapshot/prefetched_split_provider.h\"\n#include \"tensorflow/core/data/service/split_provider.h\"\n#include \"tensorflow/core/framework/dataset.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/mutex.h\"\n#include \"tsl/platform/path.h\"\n#include \"tsl/platform/status_to_from_proto.h\"\n#include \"tsl/platform/thread_annotations.h\"\n#include \"tsl/platform/threadpool.h\"\n#include \"tsl/protobuf/error_codes.pb.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nconst absl::Duration kProgressLoggingInterval = absl::Minutes(1);\nabsl::StatusOr CountSplits(SplitProvider& split_provider) {\n if (split_provider.Cardinality() != kUnknownCardinality) {\n return split_provider.Cardinality();\n }\n int64_t num_splits = 0;\n Tensor tensor;\n for (bool end_of_splits = false; !end_of_splits; ++num_splits) {\n TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));\n }\n --num_splits;\n TF_RETURN_IF_ERROR(split_provider.Reset());\n return num_splits;\n}\nabsl::Status SkipSplit(SplitProvider& split_provider,\n int64_t& repetition_index) {\n Tensor tensor;\n bool end_of_splits = false;\n TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));\n while (end_of_splits) {\n ++repetition_index;\n TF_RETURN_IF_ERROR(split_provider.Reset());\n TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));\n }\n return absl::OkStatus();\n}\nstd::string PrefetchedSplitDir(const std::string& snapshot_path,\n int64_t source_index) {\n return tsl::io::JoinPath(snapshot_path, \"prefetched_splits\",\n absl::StrCat(\"source_\", source_index));\n}\n} \nabsl::StatusOr SnapshotAssignmentManager::TryAddAssignment(\n absl::string_view snapshot_path, absl::string_view worker_address,\n int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {\n tsl::mutex_lock l(mu_);\n if (assignments_[worker_address].size() >=\n worker_max_concurrent_snapshots()) {\n return false;\n }\n Assignment assignment{std::string(snapshot_path), stream_index};\n auto [unused, success] = assignments_[worker_address].insert(assignment);\n if (!success) {\n return absl::InternalError(absl::StrCat(\"Worker \", worker_address,\n \" already had an assignment for \",\n assignment.DebugString()));\n }\n ++snapshot_assignment_counts_[snapshot_path];\n return true;\n}\nvoid SnapshotAssignmentManager::RemoveAssignment(\n absl::string_view snapshot_path, absl::string_view worker_address,\n int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {\n tsl::mutex_lock l(mu_);\n auto num_erased = assignments_[worker_address].erase(\n {std::string(snapshot_path), stream_index});\n if ((snapshot_assignment_counts_[snapshot_path] -= num_erased) <= 0) {\n snapshot_assignment_counts_.erase(snapshot_path);\n }\n}\nvoid SnapshotAssignmentManager::AddSnapshot(absl::string_view snapshot_path)\n TF_LOCKS_EXCLUDED(mu_) {\n tsl::mutex_lock l(mu_);\n if (!snapshot_assignment_counts_.contains(snapshot_path)) {\n snapshot_assignment_counts_[snapshot_path] = 0;\n }\n}\nstd::vector SnapshotAssignmentManager::LoadBalanceSnapshots(\n absl::string_view worker_address) TF_LOCKS_EXCLUDED(mu_) {\n std::vector result;\n tsl::mutex_lock l(mu_);\n result.reserve(snapshot_assignment_counts_.size());\n const auto it = assignments_.find(worker_address);\n if (it != assignments_.end()) {\n for (const Assignment& assignment : it->second) {\n result.push_back(assignment.snapshot_path);\n }\n }\n if (result.size() >= worker_max_concurrent_snapshots()) {\n return result;\n }\n absl::btree_multimap snapshots_by_count;\n for (const auto& [snapshot, count] : snapshot_assignment_counts_) {\n snapshots_by_count.emplace(count, snapshot);\n }\n for (const auto& [_, snapshot] : snapshots_by_count) {\n if (absl::c_find(result, snapshot) == result.end()) {\n result.push_back(snapshot);\n return result;\n }\n }\n return result;\n}\nabsl::StatusOr> SnapshotManager::Start(\n const SnapshotRequest& request,\n SnapshotAssignmentManager& assignment_manager, Env* env) {\n std::unique_ptr snapshot_manager{\n new SnapshotManager{request.path(), assignment_manager, env}};\n TF_RETURN_IF_ERROR(snapshot_manager->Start(request));\n return snapshot_manager;\n}\nabsl::Status SnapshotManager::Start(const SnapshotRequest& request)\n TF_LOCKS_EXCLUDED(mu_) {\n LOG(INFO) << \"Starting to write tf.data snapshot at \" << request.path();\n if (env_->FileExists(request.path()).ok()) {\n return errors::AlreadyExists(\"tf.data snapshot at \", request.path(),\n \" already exists.\");\n }\n tsl::mutex_lock l(mu_);\n TF_RETURN_IF_ERROR(WriteOnDiskSkeleton());\n TF_RETURN_IF_ERROR(WriteOnDiskMetadata(request));\n TF_ASSIGN_OR_RETURN(sources_, CreateSources(request.dataset()));\n TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());\n metadata_ = request.metadata();\n LOG(INFO) << \"Started writing tf.data distributed snapshot at \" << path_;\n return absl::OkStatus();\n}\nabsl::StatusOr>\nSnapshotManager::CreateSources(const DatasetDef& dataset_def) const\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n std::vector> split_providers;\n TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));\n std::vector sources;\n sources.reserve(split_providers.size());\n for (size_t i = 0; i < split_providers.size(); ++i) {\n TF_ASSIGN_OR_RETURN(size_t cardinality, CountSplits(*split_providers[i]));\n sources.emplace_back(\n std::make_unique(\n std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),\n 0, cardinality);\n }\n return sources;\n}\nabsl::StatusOr SnapshotManager::GetSplitsCardinality()\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n return absl::c_accumulate(sources_, 0,\n [](size_t cardinality, const Source& source) {\n return cardinality + source.cardinality;\n });\n}\nabsl::Status SnapshotManager::WriteOnDiskSkeleton()\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n TF_RETURN_IF_ERROR(\n env_->RecursivelyCreateDir(CommittedChunksDirectory(path_)));\n TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(StreamsDirectory(path_)));\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::WriteOnDiskMetadata(\n const SnapshotRequest& request) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotMetadataFilePath(path_),\n request.metadata(), env_));\n TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(\n DatasetSpecFilePath(path_), request.metadata().element_spec(), env_));\n TF_RETURN_IF_ERROR(AtomicallyWriteBinaryProto(DatasetDefFilePath(path_),\n request.dataset(), env_));\n return absl::OkStatus();\n}\nabsl::StatusOr> SnapshotManager::Resume(\n absl::string_view path, SnapshotAssignmentManager& assignment_manager,\n Env* env) {\n SnapshotManager* snapshot_manager =\n new SnapshotManager(path, assignment_manager, env);\n TF_RETURN_IF_ERROR(snapshot_manager->Resume());\n return absl::WrapUnique(snapshot_manager);\n}\nabsl::Status SnapshotManager::Resume() TF_LOCKS_EXCLUDED(mu_) {\n tsl::mutex_lock l(mu_);\n if (!env_->FileExists(path_).ok()) {\n return absl::InternalError(\n absl::StrCat(\"Failed to recover tf.data snapshot at \", path_,\n \": the snapshot path doesn't exist.\"));\n }\n if (env_->FileExists(SnapshotDoneFilePath(path_)).ok()) {\n mode_ = Mode::kDone;\n LOG(INFO) << \"Recovered finished tf.data snapshot at \" << path_;\n return absl::OkStatus();\n }\n if (env_->FileExists(SnapshotErrorFilePath(path_)).ok()) {\n mode_ = Mode::kError;\n StatusProto status_proto;\n TF_RETURN_IF_ERROR(\n ReadTextProto(env_, SnapshotErrorFilePath(path_), &status_proto));\n status_ = tsl::StatusFromProto(status_proto);\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(ReadOnDiskMetadata());\n TF_RETURN_IF_ERROR(ReadOnDiskStreams());\n LOG(INFO) << \"Resumed writing tf.data distributed snapshot at \" << path_;\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::ReadOnDiskMetadata()\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n if (!env_->FileExists(SnapshotMetadataFilePath(path_)).ok()) {\n return absl::InternalError(\n absl::StrCat(\"Failed to recover snapshot at \", path_,\n \": snapshot has no snapshot.metadata\"));\n }\n TF_RETURN_IF_ERROR(\n ReadTextProto(env_, SnapshotMetadataFilePath(path_), &metadata_));\n if (!env_->FileExists(DatasetDefFilePath(path_)).ok()) {\n return absl::InternalError(\n absl::StrCat(\"Failed to recovery snapshot at \", path_,\n \": snapshot has no dataset_def.proto\"));\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::ReadOnDiskStreams()\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n std::string streams_path = StreamsDirectory(path_);\n TF_ASSIGN_OR_RETURN(const std::vector stream_directories,\n GetChildren(streams_path, env_));\n DatasetDef dataset_def;\n TF_RETURN_IF_ERROR(\n tsl::ReadBinaryProto(env_, DatasetDefFilePath(path_), &dataset_def));\n std::vector> split_providers;\n TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));\n std::vector repetition_indices(split_providers.size(), 0);\n std::vector cardinalities;\n for (size_t i = 0; i < split_providers.size(); ++i) {\n TF_ASSIGN_OR_RETURN(int64_t cardinality, CountSplits(*split_providers[i]));\n cardinalities.push_back(cardinality);\n }\n tsl::mutex mu; \n absl::Status resume_status;\n absl::flat_hash_set global_split_indices;\n auto thread_pool = std::make_unique(\n env_, tsl::ThreadOptions{}, \"restore_snapshot_stream_thread\",\n std::max(size_t{1}, stream_directories.size()));\n for (const auto& stream_directory : stream_directories) {\n std::string stream_path = tsl::io::JoinPath(streams_path, stream_directory);\n std::vector tokens = absl::StrSplit(stream_directory, '_');\n int64_t stream_index;\n if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &stream_index) ||\n stream_index < 0) {\n return absl::InternalError(absl::StrCat(\n \"Can't parse tf.data snapshot stream directory \", stream_path,\n \": filename must have the format stream_.\"));\n }\n thread_pool->Schedule([this, &stream_directories, stream_index,\n &split_providers, &repetition_indices,\n &global_split_indices, &resume_status,\n &mu]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n StreamRestorer stream_restorer(env_, path_, stream_index,\n split_providers.size(),\n assignment_manager_);\n absl::Status s = stream_restorer.ReadOnDiskStream();\n tsl::mutex_lock l(mu);\n resume_status.Update(s);\n resume_status.Update(RestoreFrom(stream_restorer, stream_directories,\n split_providers, repetition_indices,\n global_split_indices));\n });\n }\n thread_pool.reset();\n TF_RETURN_IF_ERROR(resume_status);\n for (int64_t i = 0; i < split_providers.size(); ++i) {\n sources_.emplace_back(\n std::make_unique(\n std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),\n repetition_indices[i], cardinalities[i]);\n }\n TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());\n for (int64_t i = 0; i < global_split_indices.size(); ++i) {\n if (!global_split_indices.contains(i)) {\n return absl::InternalError(\n absl::StrCat(\"Failed to restore tf.data snapshot at \", path_,\n \": Found missing global split index \", i, \".\"));\n }\n }\n num_assigned_splits_ = global_split_indices.size();\n if (!streams_.empty() && absl::c_all_of(streams_, [](const auto& stream) {\n return stream.second.state == Stream::State::kDone;\n })) {\n mode_ = Mode::kDone;\n TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),\n std::string(), env_));\n LOG(INFO) << \"Finished writing tf.data distributed snapshot at \" << path_;\n }\n return absl::OkStatus();\n}\nabsl::StatusOr\nSnapshotManager::StreamRestorer::OwnerWorkerAddress() const {\n std::string worker_address;\n TF_RETURN_IF_ERROR(\n env_->FileExists(StreamWorkerFilePath(path_, stream_index_)));\n TF_RETURN_IF_ERROR(tsl::ReadFileToString(\n env_, StreamWorkerFilePath(path_, stream_index_), &worker_address));\n return worker_address;\n}\nabsl::Status SnapshotManager::StreamRestorer::ReadOnDiskStream() {\n absl::StatusOr worker_address = OwnerWorkerAddress();\n if (!worker_address.ok()) {\n return absl::OkStatus();\n }\n worker_address_ = *worker_address;\n restored_stream_.emplace(num_sources_);\n std::string splits_path = SplitsDirectory(path_, stream_index_);\n TF_ASSIGN_OR_RETURN(std::vector source_directories,\n GetChildren(splits_path, env_));\n for (const auto& source_directory : source_directories) {\n std::string source_path = tsl::io::JoinPath(splits_path, source_directory);\n std::vector tokens = absl::StrSplit(source_directory, '_');\n int64_t source_index = 0;\n if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &source_index) ||\n source_index < 0) {\n return absl::InternalError(absl::StrCat(\n \"Can't parse tf.data snapshot source directory \", source_path,\n \": filename must have the format source_.\"));\n }\n if (source_index >= num_sources_) {\n return absl::InternalError(\n absl::StrCat(\"Found conflict between the number of sources, \",\n num_sources_, \", and the filename of \", source_path));\n }\n TF_RETURN_IF_ERROR(ReadOnDiskSource(source_index));\n }\n if (env_->FileExists(StreamDoneFilePath(path_, stream_index_)).ok()) {\n restored_stream_->state = Stream::State::kDone;\n return absl::OkStatus();\n }\n TF_ASSIGN_OR_RETURN(bool assignment_added,\n assignment_manager_.TryAddAssignment(\n path_, *worker_address, stream_index_));\n if (!assignment_added) {\n return absl::InternalError(absl::StrCat(\n \"Failed to recover tf.data snapshot dispatcher: Worker \",\n *worker_address, \" was assigned too many streams. At most \",\n assignment_manager_.worker_max_concurrent_snapshots(),\n \" streams are allowed.\"));\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::StreamRestorer::ReadOnDiskSource(\n int64_t source_index) {\n std::string source_directory =\n SourceDirectory(path_, stream_index_, source_index);\n TF_ASSIGN_OR_RETURN(std::vector repetition_directories,\n GetChildren(source_directory, env_));\n for (const std::string& repetition : repetition_directories) {\n std::string repetition_dir =\n tsl::io::JoinPath(source_directory, repetition);\n TF_ASSIGN_OR_RETURN(std::vector split_files,\n GetChildren(repetition_dir, env_));\n for (const std::string& split_file : split_files) {\n std::string split_path = tsl::io::JoinPath(repetition_dir, split_file);\n TF_RETURN_IF_ERROR(\n ReadOnDiskSplit(source_index, split_files, split_path));\n }\n restored_stream_->num_assigned_splits_per_source[source_index] +=\n split_files.size();\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::StreamRestorer::ReadOnDiskSplit(\n int64_t source_index, const std::vector& split_files,\n const std::string& split_file) {\n TF_ASSIGN_OR_RETURN(auto split_indices, ParseSplitFilename(split_file));\n auto [local_split_index, global_split_index] = split_indices;\n if (global_split_indices_.contains(global_split_index)) {\n return absl::InternalError(absl::StrCat(\n \"Failed to restore tf.data snapshot at \", path_,\n \": Found duplicate global split index in split \", split_file, \".\"));\n }\n global_split_indices_.insert(global_split_index);\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::RestoreFrom(\n const StreamRestorer& stream_restorer,\n const std::vector& stream_directories,\n std::vector>& split_providers,\n std::vector& repetition_indices,\n absl::flat_hash_set& global_split_indices)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n if (!stream_restorer.GetStream().has_value()) {\n return absl::OkStatus();\n }\n streams_.insert(\n {stream_restorer.StreamIndex(), *stream_restorer.GetStream()});\n auto [it, success] = assignments_.insert(\n {stream_restorer.WorkerAddress(), stream_restorer.StreamIndex()});\n if (!success) {\n return absl::InternalError(absl::StrCat(\n \"tf.data dispatcher failed to assign stream \",\n stream_restorer.StreamIndex(), \" to snapshot worker \",\n stream_restorer.WorkerAddress(),\n \": The worker is already assigned stream \", it->second, \".\"));\n }\n for (int64_t source_index = 0; source_index < repetition_indices.size();\n ++source_index) {\n int64_t skip_splits = GetStream(stream_restorer.StreamIndex())\n .num_assigned_splits_per_source[source_index];\n for (int64_t i = 0; i < skip_splits; ++i) {\n TF_RETURN_IF_ERROR(SkipSplit(*split_providers[source_index],\n repetition_indices[source_index]));\n }\n }\n for (int64_t global_split_index : stream_restorer.GlobalSplitIndices()) {\n if (global_split_indices.contains(global_split_index)) {\n return absl::InternalError(\n absl::StrCat(\"Failed to restore tf.data snapshot at \", path_,\n \": Found \", \"duplicate global split index in stream \",\n stream_restorer.StreamIndex(), \".\"));\n }\n global_split_indices.insert(global_split_index);\n }\n return absl::OkStatus();\n}\nSnapshotManager::Stream& SnapshotManager::GetStream(int64_t stream_index)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n auto [it, _] = streams_.try_emplace(stream_index, num_sources());\n return it->second;\n}\nabsl::Status SnapshotManager::HandleStreamCompletion(\n int64_t stream_index, absl::string_view worker_address)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n GetStream(stream_index).state = Stream::State::kDone;\n assignment_manager_.RemoveAssignment(path_, worker_address, stream_index);\n ++num_completed_streams_;\n if (absl::c_all_of(streams_, [](const auto& stream) {\n return stream.second.state == Stream::State::kDone;\n })) {\n mode_ = Mode::kDone;\n TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),\n std::string(), env_));\n LOG(INFO) << \"Finished writing tf.data distributed snapshot at \" << path_;\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::HandleStreamError(\n absl::string_view worker_address, const StatusProto& status_proto)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n if (!status_.ok()) {\n return absl::OkStatus();\n }\n mode_ = Mode::kError;\n status_ = tsl::StatusFromProto(status_proto);\n TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotErrorFilePath(path_),\n status_proto, env_));\n LOG(ERROR) << \"Failed to write tf.data distributed snapshot at \" << path_\n << \". Worker \" << worker_address << \" reported error: \" << status_;\n return absl::OkStatus();\n}\nabsl::StatusOr>\nSnapshotManager::MaybeCreateAndAssignNewStream(absl::string_view worker_address)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n int64_t new_stream_index =\n streams_.empty() ? 0 : streams_.rbegin()->first + 1;\n TF_ASSIGN_OR_RETURN(bool assignment_added,\n assignment_manager_.TryAddAssignment(\n path_, worker_address, new_stream_index));\n if (!assignment_added) {\n return std::optional();\n }\n streams_.insert({new_stream_index, Stream(num_sources())});\n assignments_[worker_address] = new_stream_index;\n return new_stream_index;\n}\nabsl::StatusOr>>\nSnapshotManager::MaybeGetOrCreateStreamAssignment(\n absl::string_view worker_address,\n const SnapshotTaskProgress* snapshot_progress)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n std::optional assigned_stream_index;\n if (auto it = assignments_.find(worker_address); it != assignments_.end()) {\n assigned_stream_index = it->second;\n }\n if (snapshot_progress) {\n if (assigned_stream_index.has_value() &&\n *assigned_stream_index !=\n snapshot_progress->snapshot_task().stream_index()) {\n return absl::InternalError(absl::StrCat(\n \"tf.data snapshot worker \", worker_address, \" was assigned stream \",\n snapshot_progress->snapshot_task().stream_index(),\n \", but is now assigned a different stream \", *assigned_stream_index));\n }\n if (assigned_stream_index.has_value() && snapshot_progress->completed()) {\n TF_RETURN_IF_ERROR(HandleStreamCompletion(\n snapshot_progress->snapshot_task().stream_index(), worker_address));\n return std::nullopt;\n }\n if (snapshot_progress->status().code() != error::OK) {\n TF_RETURN_IF_ERROR(\n HandleStreamError(worker_address, snapshot_progress->status()));\n return std::nullopt;\n }\n }\n if (!assigned_stream_index) {\n if (mode_ != Mode::kActive) {\n return std::nullopt;\n }\n TF_ASSIGN_OR_RETURN(assigned_stream_index,\n MaybeCreateAndAssignNewStream(worker_address));\n if (!assigned_stream_index.has_value()) {\n return std::nullopt;\n }\n return std::make_pair(*assigned_stream_index, true);\n }\n if (!assigned_stream_index.has_value() ||\n GetStream(*assigned_stream_index).state == Stream::State::kDone) {\n return std::nullopt;\n }\n return std::make_pair(*assigned_stream_index, false);\n}\nabsl::Status SnapshotManager::WorkerHeartbeat(\n const WorkerHeartbeatRequest& request, WorkerHeartbeatResponse& response)\n TF_LOCKS_EXCLUDED(mu_) {\n std::optional> assigned_stream_index;\n std::vector repetitions_per_source;\n {\n tsl::mutex_lock l(mu_);\n dead_workers_.erase(request.worker_address());\n if (mode_ == Mode::kDone || mode_ == Mode::kError) {\n return absl::OkStatus();\n }\n if (absl::Time now = absl::FromUnixMicros(env_->NowMicros());\n now - last_progress_log_time_ > kProgressLoggingInterval) {\n LOG(INFO) << \"tf.data snapshot progress [\" << path_\n << \"]: \" << num_completed_streams_ << \"/\" << streams_.size()\n << \" streams completed; \" << num_assigned_splits_ << \"/\"\n << num_total_splits_ << \" splits assigned or completed.\";\n last_progress_log_time_ = now;\n }\n const SnapshotTaskProgress* snapshot_progress = nullptr;\n if (auto it = request.snapshot_task_progress().find(path_);\n it != request.snapshot_task_progress().end()) {\n snapshot_progress = &it->second;\n }\n if (snapshot_progress && snapshot_progress->completed() &&\n mode_ == Mode::kActive) {\n mode_ = Mode::kWindingDown;\n }\n TF_ASSIGN_OR_RETURN(assigned_stream_index,\n MaybeGetOrCreateStreamAssignment(\n request.worker_address(), snapshot_progress));\n if (!assigned_stream_index.has_value()) {\n return absl::OkStatus();\n }\n SnapshotTaskDef* snapshot_task = response.add_snapshot_tasks();\n snapshot_task->set_base_path(path_);\n snapshot_task->set_num_sources(num_sources());\n *snapshot_task->mutable_metadata() = metadata_;\n snapshot_task->set_stream_index(assigned_stream_index->first);\n for (int64_t source_index = 0; source_index < num_sources();\n ++source_index) {\n repetitions_per_source.push_back(sources_[source_index].repetition_index);\n }\n } \n const auto [stream_index, is_new_stream] = *assigned_stream_index;\n if (is_new_stream) {\n TF_RETURN_IF_ERROR(InitStreamDirectory(\n stream_index, request.worker_address(), repetitions_per_source));\n LOG(INFO) << \"For snapshot at \" << path_ << \", created stream_\"\n << stream_index << \" and assigned to \"\n << request.worker_address();\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::InitStreamDirectory(\n int64_t stream_index, const std::string& worker_address,\n const std::vector& repetitions_per_source) {\n for (int64_t source_index = 0; source_index < repetitions_per_source.size();\n ++source_index) {\n for (int64_t repetition_index = 0;\n repetition_index <= repetitions_per_source[source_index];\n ++repetition_index) {\n TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(\n path_, stream_index, source_index, repetition_index)));\n }\n }\n return AtomicallyWriteStringToFile(StreamWorkerFilePath(path_, stream_index),\n worker_address, env_);\n}\nabsl::Status SnapshotManager::GetSnapshotSplit(\n const GetSnapshotSplitRequest& request, GetSnapshotSplitResponse& response)\n TF_LOCKS_EXCLUDED(get_split_mu_, mu_) {\n int64_t local_split_index = 0;\n int64_t global_split_index = 0;\n PrefetchedSplitProvider* split_provider = nullptr;\n tsl::mutex_lock get_split_lock(get_split_mu_);\n {\n tsl::mutex_lock l(mu_);\n if (auto it = assignments_.find(request.worker_address());\n it == assignments_.end()) {\n return absl::InternalError(\n absl::StrCat(\"tf.data snapshot worker \", request.worker_address(),\n \" was assigned stream \", request.stream_index(),\n \", but the assignment is no longer available.\"));\n } else if (it->second != request.stream_index()) {\n return absl::InternalError(\n absl::StrCat(\"tf.data snapshot worker \", request.worker_address(),\n \" was assigned stream \", request.stream_index(),\n \" but is now assigned a different stream \", it->second));\n }\n Stream& stream = GetStream(request.stream_index());\n local_split_index =\n stream.num_assigned_splits_per_source[request.source_index()];\n global_split_index = num_assigned_splits_;\n response.set_local_split_index(local_split_index);\n Source& source = sources_[request.source_index()];\n if (request.repetition_index() < source.repetition_index) {\n response.set_end_of_splits(true);\n return absl::OkStatus();\n }\n while (request.repetition_index() > source.repetition_index) {\n TF_RETURN_IF_ERROR(ResetSource(source, request.source_index()));\n }\n split_provider = source.split_provider.get();\n }\n std::string split_path = SplitPath(\n path_, request.stream_index(), request.source_index(),\n request.repetition_index(), local_split_index, global_split_index);\n TF_ASSIGN_OR_RETURN(std::optional split,\n split_provider->GetNext(split_path));\n if (!split.has_value()) {\n response.set_end_of_splits(true);\n return absl::OkStatus();\n }\n split->AsProtoTensorContent(response.mutable_split());\n tsl::mutex_lock l(mu_);\n ++GetStream(request.stream_index())\n .num_assigned_splits_per_source[request.source_index()];\n ++num_assigned_splits_;\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::ResetSource(Source& source, int64_t source_index)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n TF_RETURN_IF_ERROR(source.split_provider->Reset());\n ++source.repetition_index;\n LOG(INFO) << \"Starting repetition_\" << source.repetition_index << \" \"\n << \"for snapshot \" << path_ << \", source \" << source_index;\n for (const auto& [stream_index, _] : streams_) {\n TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(\n path_, stream_index, source_index, source.repetition_index)));\n }\n return absl::OkStatus();\n}\nabsl::Status SnapshotManager::GetSnapshotStreams(\n GetSnapshotStreamsResponse& response) TF_LOCKS_EXCLUDED(mu_) {\n tsl::tf_shared_lock l(mu_);\n for (const auto& [stream_index, stream] : streams_) {\n SnapshotStreamInfo* stream_info = response.add_streams();\n stream_info->set_index(stream_index);\n stream_info->set_state(stream.state == Stream::State::kDone\n ? SnapshotStreamInfo::DONE\n : SnapshotStreamInfo::ASSIGNED);\n }\n return absl::OkStatus();\n}\nvoid SnapshotManager::Cancel() {\n std::vector split_providers_to_cancel;\n {\n tsl::mutex_lock l(mu_);\n for (Source& source : sources_) {\n split_providers_to_cancel.push_back(source.split_provider.get());\n }\n }\n for (PrefetchedSplitProvider* split_provider : split_providers_to_cancel) {\n split_provider->Cancel();\n }\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/data/service/snapshot/snapshot_manager.h\"\n#include \n#include \n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/tsl/protobuf/status.pb.h\"\n#include \"tensorflow/core/data/service/common.pb.h\"\n#include \"tensorflow/core/data/service/dispatcher.pb.h\"\n#include \"tensorflow/core/data/service/snapshot/path_utils.h\"\n#include \"tensorflow/core/data/service/test_util.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor.pb.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/status.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/status_to_from_proto.h\"\n#include \"tsl/platform/statusor.h\"\n#include \"tsl/platform/test.h\"\n#include \"tsl/protobuf/error_codes.pb.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nusing ::testing::_;\nusing ::testing::ElementsAre;\nusing ::testing::IsEmpty;\nusing ::testing::Not;\nusing ::testing::SizeIs;\nusing ::testing::UnorderedElementsAre;\nusing ::tsl::testing::IsOkAndHolds;\nusing ::tsl::testing::StatusIs;\ntemplate \nT GetValue(const Tensor& tensor) {\n return tensor.unaligned_flat().data()[0];\n}\nTEST(SnapshotManagerTest, CreateStreamAssignment) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest request;\n *request.mutable_dataset() = testing::RangeDataset(10);\n request.set_path(snapshot_path);\n *request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(request, snapshot_assignment_manager,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);\n EXPECT_EQ(heartbeat_response.snapshot_tasks(0).base_path(), snapshot_path);\n EXPECT_EQ(heartbeat_response.snapshot_tasks(0).stream_index(), 0);\n EXPECT_EQ(heartbeat_response.snapshot_tasks(0).num_sources(), 1);\n}\nTEST(SnapshotManagerTest, GetSnapshotSplit) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest request;\n *request.mutable_dataset() = testing::RangeDataset(10);\n request.set_path(snapshot_path);\n *request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(request, snapshot_assignment_manager,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);\n GetSnapshotSplitRequest get_split_request;\n GetSnapshotSplitResponse get_split_response;\n get_split_request.set_worker_address(\"localhost\");\n get_split_request.set_base_path(task.base_path());\n get_split_request.set_stream_index(task.stream_index());\n get_split_request.set_source_index(0);\n for (int64_t i = 0; i < 10; ++i) {\n TF_ASSERT_OK(snapshot_manager->GetSnapshotSplit(get_split_request,\n get_split_response));\n Tensor tensor;\n ASSERT_TRUE(tensor.FromProto(get_split_response.split()));\n EXPECT_EQ(GetValue(tensor), i);\n }\n}\nTEST(SnapshotManagerTest, HandleStreamCompletion) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest request;\n *request.mutable_dataset() = testing::RangeDataset(10);\n request.set_path(snapshot_path);\n *request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(request, snapshot_assignment_manager,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost:1\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n heartbeat_request.Clear();\n heartbeat_response.Clear();\n heartbeat_request.set_worker_address(\"localhost:2\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);\n const SnapshotTaskDef& snapshot_task = heartbeat_response.snapshot_tasks(0);\n EXPECT_EQ(snapshot_task.base_path(), snapshot_path);\n EXPECT_EQ(snapshot_task.stream_index(), 1);\n EXPECT_EQ(snapshot_task.num_sources(), 1);\n heartbeat_request.Clear();\n heartbeat_response.Clear();\n heartbeat_request.set_worker_address(\"localhost:1\");\n SnapshotTaskProgress progress;\n *progress.mutable_snapshot_task() = snapshot_task;\n progress.set_completed(true);\n (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =\n progress;\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());\n heartbeat_request.Clear();\n heartbeat_response.Clear();\n heartbeat_request.set_worker_address(\"localhost:1\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());\n}\nTEST(SnapshotManagerTest, Resume) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest request;\n *request.mutable_dataset() = testing::RangeDataset(10);\n request.set_path(snapshot_path);\n *request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager_1(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(request, snapshot_assignment_manager_1,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));\n heartbeat_response.Clear();\n SnapshotAssignmentManager snapshot_assignment_manager_2(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr resumed_manager,\n SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,\n Env::Default()));\n TF_EXPECT_OK(\n resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));\n}\nTEST(SnapshotManagerTest, SnapshotStreamError) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest snapshot_request;\n *snapshot_request.mutable_dataset() = testing::RangeDataset(10);\n snapshot_request.set_path(snapshot_path);\n *snapshot_request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(snapshot_request, snapshot_assignment_manager,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);\n heartbeat_response.Clear();\n SnapshotTaskProgress snapshot_task_progress;\n *snapshot_task_progress.mutable_snapshot_task() = task;\n *snapshot_task_progress.mutable_status() =\n tsl::StatusToProto(errors::NotFound(\"Not found\"));\n (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =\n snapshot_task_progress;\n TF_EXPECT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());\n TF_ASSERT_OK(\n Env::Default()->FileExists(SnapshotErrorFilePath(snapshot_path)));\n StatusProto status_proto;\n TF_ASSERT_OK(ReadTextProto(\n Env::Default(), SnapshotErrorFilePath(snapshot_path), &status_proto));\n EXPECT_THAT(tsl::StatusFromProto(status_proto),\n StatusIs(error::NOT_FOUND, \"Not found\"));\n}\nTEST(SnapshotManagerTest, ResumeFromError) {\n std::string snapshot_path = testing::LocalTempFilename();\n SnapshotRequest request;\n *request.mutable_dataset() = testing::RangeDataset(10);\n request.set_path(snapshot_path);\n *request.mutable_metadata() =\n testing::CreateDummyDistributedSnapshotMetadata();\n SnapshotAssignmentManager snapshot_assignment_manager_1(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr snapshot_manager,\n SnapshotManager::Start(request, snapshot_assignment_manager_1,\n Env::Default()));\n WorkerHeartbeatRequest heartbeat_request;\n WorkerHeartbeatResponse heartbeat_response;\n heartbeat_request.set_worker_address(\"localhost\");\n TF_ASSERT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n ASSERT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));\n const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);\n heartbeat_response.Clear();\n SnapshotTaskProgress snapshot_task_progress;\n *snapshot_task_progress.mutable_snapshot_task() = task;\n *snapshot_task_progress.mutable_status() =\n tsl::StatusToProto(errors::NotFound(\"Not found\"));\n (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =\n snapshot_task_progress;\n TF_EXPECT_OK(\n snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());\n heartbeat_response.Clear();\n SnapshotAssignmentManager snapshot_assignment_manager_2(\n 2);\n TF_ASSERT_OK_AND_ASSIGN(\n std::unique_ptr resumed_manager,\n SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,\n Env::Default()));\n TF_EXPECT_OK(\n resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));\n EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());\n}\nTEST(SnapshotAssignmentManagerTest, LoadBalanceSnapshots) {\n SnapshotAssignmentManager snapshot_assignment_manager(\n 2);\n snapshot_assignment_manager.AddSnapshot(\"snapshot_1\");\n snapshot_assignment_manager.AddSnapshot(\"snapshot_2\");\n snapshot_assignment_manager.AddSnapshot(\"snapshot_3\");\n EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(\n \"snapshot_3\", \"worker_1\", 0),\n IsOkAndHolds(true));\n EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_1\"),\n ElementsAre(\"snapshot_3\", _));\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_2\"),\n ElementsAre(Not(\"snapshot_3\")));\n EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(\n \"snapshot_2\", \"worker_1\", 0),\n IsOkAndHolds(true));\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_1\"),\n UnorderedElementsAre(\"snapshot_2\", \"snapshot_3\"));\n EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_2\"),\n ElementsAre(\"snapshot_1\"));\n EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(\n \"snapshot_1\", \"worker_1\", 0),\n IsOkAndHolds(false));\n EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(\n \"snapshot_2\", \"worker_2\", 0),\n IsOkAndHolds(true));\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_1\"),\n UnorderedElementsAre(\"snapshot_2\", \"snapshot_3\"));\n EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_2\"),\n ElementsAre(\"snapshot_2\", \"snapshot_1\"));\n snapshot_assignment_manager.RemoveAssignment(\"snapshot_2\", \"worker_1\",\n 0);\n EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_1\"),\n ElementsAre(\"snapshot_3\", \"snapshot_1\"));\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_2\"),\n ElementsAre(\"snapshot_2\", \"snapshot_1\"));\n snapshot_assignment_manager.RemoveAssignment(\"snapshot_3\", \"worker_1\",\n 0);\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_1\"),\n ElementsAre(\"snapshot_1\"));\n ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots(\"worker_2\"),\n ElementsAre(\"snapshot_2\", \"snapshot_1\"));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":269,"cells":{"ID":{"kind":"string","value":"6fdff665-ed54-4a1b-8aa2-c0280a997a05"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"tf_threadpool_concurrent_work_queue"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h\"\n#include \n#include \n#include \n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/threadpool.h\"\n#include \"tensorflow/core/platform/threadpool_interface.h\"\n#include \"tensorflow/core/tfrt/utils/thread_pool.h\"\n#include \"tfrt/host_context/async_value.h\" \n#include \"tfrt/host_context/execution_context.h\" \n#include \"tfrt/host_context/task_function.h\" \n#include \"tfrt/support/forward_decls.h\" \n#include \"tfrt/support/latch.h\" \nnamespace tensorflow {\nnamespace tfrt_stub {\nusing ::tensorflow::thread::ThreadPoolInterface;\nabsl::StatusOr>\nTfThreadPoolWorkQueue::InitializeRequest(int64_t request_id) const {\n return {std::make_unique(\n request_id, intra_op_threadpool_, inter_op_threadpool_)};\n}\nvoid TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) {\n auto* copy = new tfrt::TaskFunction(\n tensorflow::tfrt_stub::WrapWork(id(), \"inter\", std::move(work)));\n inter_op_threadpool_->Schedule([copy] {\n (*copy)();\n delete copy;\n });\n}\nstd::optional TfThreadPoolWorkQueue::AddBlockingTask(\n tfrt::TaskFunction work, bool allow_queuing) {\n AddTask(std::move(work));\n return std::nullopt;\n}\nvoid TfThreadPoolWorkQueue::Quiesce() {\n}\nvoid TfThreadPoolWorkQueue::Await(\n tfrt::ArrayRef> values) {\n tfrt::latch values_remaining(values.size());\n for (auto& value : values) {\n value->AndThen([&values_remaining]() { values_remaining.count_down(); });\n }\n values_remaining.wait();\n}\nbool TfThreadPoolWorkQueue::IsInWorkerThread() const {\n return true;\n}\nstd::unique_ptr CreateDefaultTfThreadPoolWorkQueue(\n int num_inter_op_threads, int num_intra_op_threads) {\n struct ThreadPools {\n TfThreadPool inter_op_threadpool;\n TfThreadPool intra_op_threadpool;\n ThreadPools(int num_inter_op_threads, int num_intra_op_threads)\n : inter_op_threadpool(\"default_work_queue_inter\", num_inter_op_threads),\n intra_op_threadpool(\"default_work_queue_intra\",\n num_intra_op_threads) {}\n };\n class Wrapper : public TfThreadPoolWorkQueue {\n public:\n explicit Wrapper(std::unique_ptr thread_pools)\n : TfThreadPoolWorkQueue(\n &thread_pools->intra_op_threadpool,\n &thread_pools->inter_op_threadpool),\n thread_pools_(std::move(thread_pools)) {}\n ~Wrapper() override = default;\n private:\n std::unique_ptr thread_pools_;\n };\n return std::make_unique(std::make_unique(\n num_inter_op_threads, num_intra_op_threads));\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h\"\n#include \n#include \n#include \n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/status_matchers.h\"\n#include \"tensorflow/core/tfrt/utils/thread_pool.h\"\n#include \"tfrt/host_context/host_allocator.h\" \n#include \"tfrt/host_context/host_context.h\" \n#include \"tfrt/support/latch.h\" \nnamespace tensorflow {\nnamespace tfrt_stub {\nnamespace {\nconst int32_t kNumThreads = 2;\nclass TfThreadpoolWorkQueueTest : public ::testing::Test {\n protected:\n TfThreadpoolWorkQueueTest()\n : tf_threadpool_cwq_(CreateDefaultTfThreadPoolWorkQueue(\n kNumThreads,\n kNumThreads)) {}\n std::unique_ptr tf_threadpool_cwq_;\n};\nTEST_F(TfThreadpoolWorkQueueTest, GetParallelismLevelOk) {\n EXPECT_GT(tf_threadpool_cwq_->GetParallelismLevel(), 0);\n}\nTEST_F(TfThreadpoolWorkQueueTest, GetNameOk) {\n EXPECT_EQ(tf_threadpool_cwq_->name(), \"TfThreadPoolWorkQueue\");\n}\nTEST_F(TfThreadpoolWorkQueueTest, InitializeRequestOk) {\n tfrt::RequestContextBuilder ctx_builder(nullptr,\n nullptr);\n auto queue = tf_threadpool_cwq_->InitializeRequest(0);\n TF_ASSERT_OK(queue.status());\n EXPECT_NE(*queue, nullptr);\n EXPECT_NE((*queue)->GetIntraOpThreadPool(), nullptr);\n}\nTEST_F(TfThreadpoolWorkQueueTest, IsInWorkerThreadOk) {\n EXPECT_TRUE(tf_threadpool_cwq_->IsInWorkerThread());\n}\nTEST_F(TfThreadpoolWorkQueueTest, RunningBlockingTask) {\n tfrt::latch latch(10);\n int n = 0;\n tensorflow::mutex m;\n for (int i = 0; i < 10; ++i) {\n tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {\n {\n tensorflow::mutex_lock lock(m);\n ++n;\n }\n latch.count_down();\n }),\n true);\n }\n latch.wait();\n EXPECT_EQ(n, 10);\n}\nTEST_F(TfThreadpoolWorkQueueTest, RunningNonBlockingTask) {\n tfrt::latch latch(10);\n int n = 0;\n tensorflow::mutex m;\n for (int i = 0; i < 10; ++i) {\n tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {\n {\n tensorflow::mutex_lock lock(m);\n ++n;\n }\n latch.count_down();\n }));\n }\n latch.wait();\n EXPECT_EQ(n, 10);\n}\nTEST_F(TfThreadpoolWorkQueueTest, RunningMixedTask) {\n tfrt::latch latch(20);\n int n = 0;\n tensorflow::mutex m;\n for (int i = 0; i < 10; ++i) {\n tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {\n {\n tensorflow::mutex_lock lock(m);\n ++n;\n }\n latch.count_down();\n }));\n tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {\n {\n tensorflow::mutex_lock lock(m);\n ++n;\n }\n latch.count_down();\n }),\n true);\n }\n latch.wait();\n EXPECT_EQ(n, 20);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":270,"cells":{"ID":{"kind":"string","value":"0ceab836-2f6f-4d6c-85b5-579dbed57ba0"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dot_dimension_merger"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/dot_dimension_merger.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/dot_dimension_merger_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/dot_dimension_merger.h\"\n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/layout_util.h\"\n#include \"xla/service/hlo_creation_utils.h\"\n#include \"xla/shape.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nstd::vector ShiftDimensions(absl::Span dimensions,\n const int64_t start, const int64_t shift) {\n std::vector new_dimensions;\n new_dimensions.reserve(dimensions.size());\n for (const int64_t i : dimensions) {\n if (i < start) {\n new_dimensions.push_back(i);\n } else {\n new_dimensions.push_back(i - shift);\n }\n }\n return new_dimensions;\n}\nclass BatchDimensionMerger : public DfsHloRewriteVisitor {\n public:\n absl::Status HandleDot(HloInstruction* dot) override {\n const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();\n const Shape& lhs_shape = dot->operand(0)->shape();\n const Shape& rhs_shape = dot->operand(1)->shape();\n CHECK_EQ(dnums.lhs_batch_dimensions_size(),\n dnums.rhs_batch_dimensions_size());\n const int64_t batch_dimension_count = dnums.lhs_batch_dimensions_size();\n if (batch_dimension_count < 2 ||\n !DistinctNumbersAreConsecutiveIfSorted(dnums.lhs_batch_dimensions()) ||\n !DistinctNumbersAreConsecutiveIfSorted(dnums.rhs_batch_dimensions()) ||\n !absl::c_is_sorted(dnums.lhs_batch_dimensions()) ||\n !absl::c_is_sorted(dnums.rhs_batch_dimensions()) ||\n !LayoutUtil::AreDimensionsConsecutive(lhs_shape.layout(),\n dnums.lhs_batch_dimensions()) ||\n !LayoutUtil::AreDimensionsConsecutive(rhs_shape.layout(),\n dnums.rhs_batch_dimensions())) {\n return absl::OkStatus();\n }\n const int64_t lhs_batch_dimension =\n *absl::c_min_element(dnums.lhs_batch_dimensions());\n const int64_t rhs_batch_dimension =\n *absl::c_min_element(dnums.rhs_batch_dimensions());\n int64_t batch_size = 1;\n for (const int64_t dimension_number : dnums.lhs_batch_dimensions()) {\n batch_size *= lhs_shape.dimensions(dimension_number);\n }\n auto merge_batch_dims = [&](Shape old_shape, int64_t batch_dim) {\n Shape new_shape = old_shape;\n for (int64_t i = 1; i < batch_dimension_count; ++i) {\n new_shape.DeleteDimension(batch_dim + 1);\n }\n new_shape.set_dimensions(batch_dim, batch_size);\n return new_shape;\n };\n Shape new_lhs_shape = merge_batch_dims(lhs_shape, lhs_batch_dimension);\n Shape new_rhs_shape = merge_batch_dims(rhs_shape, rhs_batch_dimension);\n DotDimensionNumbers new_dot_dimension_numbers;\n new_dot_dimension_numbers.add_lhs_batch_dimensions(lhs_batch_dimension);\n new_dot_dimension_numbers.add_rhs_batch_dimensions(rhs_batch_dimension);\n {\n const std::vector shifted_contracting_dimensions =\n ShiftDimensions(dnums.lhs_contracting_dimensions(),\n lhs_batch_dimension, batch_dimension_count - 1);\n new_dot_dimension_numbers.mutable_lhs_contracting_dimensions()->Assign(\n shifted_contracting_dimensions.begin(),\n shifted_contracting_dimensions.end());\n }\n {\n const std::vector shifted_contracting_dimensions =\n ShiftDimensions(dnums.rhs_contracting_dimensions(),\n rhs_batch_dimension, batch_dimension_count - 1);\n new_dot_dimension_numbers.mutable_rhs_contracting_dimensions()->Assign(\n shifted_contracting_dimensions.begin(),\n shifted_contracting_dimensions.end());\n }\n auto sparsity = Cast(dot)->sparsity();\n std::vector new_sparsity(sparsity.begin(),\n sparsity.end());\n std::vector sparse_meta(sparsity.size());\n for (int i = 0; i < sparsity.size(); ++i) {\n SparsityDescriptor& descriptor = new_sparsity[i];\n int64_t sparse_batch_dim =\n descriptor.index() == 0 ? lhs_batch_dimension : rhs_batch_dimension;\n if (descriptor.dimension() > sparse_batch_dim)\n descriptor.set_dimension(descriptor.dimension() -\n (batch_dimension_count - 1));\n HloInstruction* meta =\n dot->mutable_operand(HloDotInstruction::kOperands + i);\n Shape new_meta_shape = merge_batch_dims(meta->shape(), sparse_batch_dim);\n TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeReshapeHlo(new_meta_shape, meta));\n }\n TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_lhs,\n MakeReshapeHlo(new_lhs_shape, dot->mutable_operand(0)));\n TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_rhs,\n MakeReshapeHlo(new_rhs_shape, dot->mutable_operand(1)));\n Shape new_dot_shape = merge_batch_dims(dot->shape(), 0);\n HloInstruction* new_dot = dot->parent()->AddInstruction(\n HloInstruction::CreateDot(new_dot_shape, reshaped_lhs, reshaped_rhs,\n new_dot_dimension_numbers,\n dot->precision_config(), new_sparsity,\n sparse_meta),\n &dot->metadata());\n dot->SetupDerivedInstruction(new_dot);\n std::unique_ptr out_reshape =\n HloInstruction::CreateReshape(dot->shape(), new_dot);\n return ReplaceWithNewInstruction(dot, std::move(out_reshape));\n }\n};\n} \nabsl::StatusOr DotDimensionMerger::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n return BatchDimensionMerger().RunOnModule(module, execution_threads);\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/dot_dimension_merger.h\"\n#include \n#include \n#include \n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nusing DotDimensionMergerTest = HloTestBase;\nTEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensions) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[79,2,4,12,11] parameter(0)\n p1 = bf16[79,2,4,11,44] parameter(1)\n ROOT d = bf16[2,4,12,44] dot(p0, p1),\n lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},\n rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},\n metadata={op_name=\"testname\"}\n})\";\n RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R\"(\n; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,2,1,0} reshape(%p0)\n; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{3,2,1,0} reshape(%p1)\n; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,1,0} dot(%[[R0]], %[[R1]])\n; CHECK-SAME: lhs_batch_dims={1}\n; CHECK-SAME: lhs_contracting_dims={0,3}\n; CHECK-SAME: rhs_batch_dims={1}\n; CHECK-SAME: rhs_contracting_dims={0,2}\n; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,2,1,0} reshape(%[[DOT]])\n; CHECK-SAME: metadata={op_name=\"testname\"}\n )\");\n}\nTEST_F(DotDimensionMergerTest,\n MergeConsecutiveBatchDimensionsNonDefaultLayouts) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[79,2,4,12,11]{4,0,3,2,1} parameter(0)\n p1 = bf16[79,2,4,11,44]{3,0,4,2,1} parameter(1)\n ROOT d = bf16[2,4,12,44]{3,1,0,2} dot(p0, p1),\n lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},\n rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},\n metadata={op_name=\"testname\"}\n})\";\n RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R\"(\n; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,0,2,1} reshape(%p0)\n; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{2,0,3,1} reshape(%p1)\n; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,0,1} dot(%[[R0]], %[[R1]])\n; CHECK-SAME: lhs_batch_dims={1}\n; CHECK-SAME: lhs_contracting_dims={0,3}\n; CHECK-SAME: rhs_batch_dims={1}\n; CHECK-SAME: rhs_contracting_dims={0,2}\n; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,1,0,2} reshape(%[[DOT]])\n; CHECK-SAME: metadata={op_name=\"testname\"}\n )\");\n}\nTEST_F(DotDimensionMergerTest, SkipPhysicallyNonConsecutiveBatchDimensions) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[2,4,12,13]{3,1,2,0} parameter(0)\n p1 = bf16[2,4,13,55]{3,2,1,0} parameter(1)\n ROOT d = bf16[2,4,12,55]{3,2,1,0} dot(p0, p1),\n lhs_batch_dims={0,1}, lhs_contracting_dims={3},\n rhs_batch_dims={0,1}, rhs_contracting_dims={2}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TF_ASSERT_OK_AND_ASSIGN(bool modified,\n DotDimensionMerger().Run(module.get()));\n EXPECT_FALSE(modified);\n}\nTEST_F(DotDimensionMergerTest, SkipUnsortedBatchDimensions) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[4,2,12,13] parameter(0)\n p1 = bf16[2,4,13,55] parameter(1)\n ROOT d = bf16[2,4,12,55] dot(p0, p1),\n lhs_batch_dims={1,0}, lhs_contracting_dims={3},\n rhs_batch_dims={0,1}, rhs_contracting_dims={2}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TF_ASSERT_OK_AND_ASSIGN(bool modified,\n DotDimensionMerger().Run(module.get()));\n EXPECT_FALSE(modified);\n}\nTEST_F(DotDimensionMergerTest, SkipLogicallyNonConsecutiveBatchDimensions) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[2,12,4,13] parameter(0)\n p1 = bf16[2,4,13,55] parameter(1)\n ROOT d = bf16[2,4,12,55] dot(p0, p1),\n lhs_batch_dims={0,2}, lhs_contracting_dims={3},\n rhs_batch_dims={0,1}, rhs_contracting_dims={2}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr module,\n ParseAndReturnVerifiedModule(kHloText));\n TF_ASSERT_OK_AND_ASSIGN(bool modified,\n DotDimensionMerger().Run(module.get()));\n EXPECT_FALSE(modified);\n}\nTEST_F(DotDimensionMergerTest, SparseDotUpdatesDescriptor) {\n const std::string kHloText = R\"(\nHloModule m\nENTRY e {\n p0 = bf16[3,4,5,6,16] parameter(0)\n p1 = bf16[3,4,5,32,6] parameter(1)\n meta = u16[3,4,5,6,2] parameter(2)\n ROOT d = bf16[4,5,6,6] dot(p0, p1, meta), sparsity=L.4@2:4,\n lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},\n rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}\n})\";\n RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R\"(\n; CHECK: %[[R0:.*]] = bf16[3,20,6,16]{3,2,1,0} reshape(%p0)\n; CHECK: %[[R1:.*]] = bf16[3,20,32,6]{3,2,1,0} reshape(%p1)\n; CHECK: %[[R2:.*]] = u16[3,20,6,2]{3,2,1,0} reshape(%meta)\n; CHECK: %[[DOT:.*]] = bf16[20,6,6]{2,1,0} dot(%[[R0]], %[[R1]], %[[R2]])\n; CHECK-SAME: lhs_batch_dims={1}\n; CHECK-SAME: lhs_contracting_dims={0,3}\n; CHECK-SAME: rhs_batch_dims={1}\n; CHECK-SAME: rhs_contracting_dims={0,2}\n; CHECK-SAME: sparsity=L.3@2:4\n; CHECK-NEXT: ROOT {{.+}} = bf16[4,5,6,6]{3,2,1,0} reshape(%[[DOT]])\n )\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":271,"cells":{"ID":{"kind":"string","value":"974b7765-e143-439c-bdc2-2ea618027569"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"cache"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/tsl/lib/io/cache.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/tsl/lib/io/cache_test.cc"},"Code":{"kind":"string","value":"#include \"xla/tsl/lib/io/cache.h\"\n#include \n#include \n#include \n#include \n#include \"tsl/platform/mutex.h\"\n#include \"tsl/platform/raw_coding.h\"\nnamespace tsl {\nnamespace table {\nCache::~Cache() {}\nnamespace {\nstruct LRUHandle {\n void* value;\n void (*deleter)(const Slice&, void* value);\n LRUHandle* next_hash;\n LRUHandle* next;\n LRUHandle* prev;\n size_t charge; \n size_t key_length;\n bool in_cache; \n uint32_t refs; \n uint32_t hash; \n char key_data[1]; \n Slice key() const {\n assert(next != this);\n return Slice(key_data, key_length);\n }\n};\nclass HandleTable {\n public:\n HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }\n ~HandleTable() { delete[] list_; }\n LRUHandle* Lookup(const Slice& key, uint32_t hash) {\n return *FindPointer(key, hash);\n }\n LRUHandle* Insert(LRUHandle* h) {\n LRUHandle** ptr = FindPointer(h->key(), h->hash);\n LRUHandle* old = *ptr;\n h->next_hash = (old == nullptr ? nullptr : old->next_hash);\n *ptr = h;\n if (old == nullptr) {\n ++elems_;\n if (elems_ > length_) {\n Resize();\n }\n }\n return old;\n }\n LRUHandle* Remove(const Slice& key, uint32_t hash) {\n LRUHandle** ptr = FindPointer(key, hash);\n LRUHandle* result = *ptr;\n if (result != nullptr) {\n *ptr = result->next_hash;\n --elems_;\n }\n return result;\n }\n private:\n uint32_t length_;\n uint32_t elems_;\n LRUHandle** list_;\n LRUHandle** FindPointer(const Slice& key, uint32_t hash) {\n LRUHandle** ptr = &list_[hash & (length_ - 1)];\n while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {\n ptr = &(*ptr)->next_hash;\n }\n return ptr;\n }\n void Resize() {\n uint32_t new_length = 4;\n while (new_length < elems_) {\n new_length *= 2;\n }\n LRUHandle** new_list = new LRUHandle*[new_length];\n memset(new_list, 0, sizeof(new_list[0]) * new_length);\n uint32_t count = 0;\n for (uint32_t i = 0; i < length_; i++) {\n LRUHandle* h = list_[i];\n while (h != nullptr) {\n LRUHandle* next = h->next_hash;\n uint32_t hash = h->hash;\n LRUHandle** ptr = &new_list[hash & (new_length - 1)];\n h->next_hash = *ptr;\n *ptr = h;\n h = next;\n count++;\n }\n }\n assert(elems_ == count);\n delete[] list_;\n list_ = new_list;\n length_ = new_length;\n }\n};\nclass LRUCache {\n public:\n LRUCache();\n ~LRUCache();\n void SetCapacity(size_t capacity) { capacity_ = capacity; }\n Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,\n size_t charge,\n void (*deleter)(const Slice& key, void* value));\n Cache::Handle* Lookup(const Slice& key, uint32_t hash);\n void Release(Cache::Handle* handle);\n void Erase(const Slice& key, uint32_t hash);\n void Prune();\n size_t TotalCharge() const {\n mutex_lock l(mutex_);\n return usage_;\n }\n private:\n void LRU_Remove(LRUHandle* e);\n void LRU_Append(LRUHandle* list, LRUHandle* e);\n void Ref(LRUHandle* e);\n void Unref(LRUHandle* e);\n bool FinishErase(LRUHandle* e) TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);\n size_t capacity_;\n mutable mutex mutex_;\n size_t usage_ TF_GUARDED_BY(mutex_);\n LRUHandle lru_ TF_GUARDED_BY(mutex_);\n LRUHandle in_use_ TF_GUARDED_BY(mutex_);\n HandleTable table_ TF_GUARDED_BY(mutex_);\n};\nLRUCache::LRUCache() : capacity_(0), usage_(0) {\n lru_.next = &lru_;\n lru_.prev = &lru_;\n in_use_.next = &in_use_;\n in_use_.prev = &in_use_;\n}\nLRUCache::~LRUCache() {\n assert(in_use_.next == &in_use_); \n for (LRUHandle* e = lru_.next; e != &lru_;) {\n LRUHandle* next = e->next;\n assert(e->in_cache);\n e->in_cache = false;\n assert(e->refs == 1); \n Unref(e);\n e = next;\n }\n}\nvoid LRUCache::Ref(LRUHandle* e) {\n if (e->refs == 1 && e->in_cache) { \n LRU_Remove(e);\n LRU_Append(&in_use_, e);\n }\n e->refs++;\n}\nvoid LRUCache::Unref(LRUHandle* e) {\n assert(e->refs > 0);\n e->refs--;\n if (e->refs == 0) { \n assert(!e->in_cache);\n (*e->deleter)(e->key(), e->value);\n free(e);\n } else if (e->in_cache && e->refs == 1) {\n LRU_Remove(e);\n LRU_Append(&lru_, e);\n }\n}\nvoid LRUCache::LRU_Remove(LRUHandle* e) {\n e->next->prev = e->prev;\n e->prev->next = e->next;\n}\nvoid LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {\n e->next = list;\n e->prev = list->prev;\n e->prev->next = e;\n e->next->prev = e;\n}\nCache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {\n mutex_lock l(mutex_);\n LRUHandle* e = table_.Lookup(key, hash);\n if (e != nullptr) {\n Ref(e);\n }\n return reinterpret_cast(e);\n}\nvoid LRUCache::Release(Cache::Handle* handle) {\n mutex_lock l(mutex_);\n Unref(reinterpret_cast(handle));\n}\nCache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,\n size_t charge,\n void (*deleter)(const Slice& key,\n void* value)) {\n mutex_lock l(mutex_);\n LRUHandle* e =\n reinterpret_cast(malloc(sizeof(LRUHandle) - 1 + key.size()));\n e->value = value;\n e->deleter = deleter;\n e->charge = charge;\n e->key_length = key.size();\n e->hash = hash;\n e->in_cache = false;\n e->refs = 1; \n memcpy(e->key_data, key.data(), key.size());\n if (capacity_ > 0) {\n e->refs++; \n e->in_cache = true;\n LRU_Append(&in_use_, e);\n usage_ += charge;\n FinishErase(table_.Insert(e));\n } else { \n e->next = nullptr;\n }\n while (usage_ > capacity_ && lru_.next != &lru_) {\n LRUHandle* old = lru_.next;\n assert(old->refs == 1);\n bool erased = FinishErase(table_.Remove(old->key(), old->hash));\n if (!erased) { \n assert(erased);\n }\n }\n return reinterpret_cast(e);\n}\nbool LRUCache::FinishErase(LRUHandle* e) {\n if (e != nullptr) {\n assert(e->in_cache);\n LRU_Remove(e);\n e->in_cache = false;\n usage_ -= e->charge;\n Unref(e);\n }\n return e != nullptr;\n}\nvoid LRUCache::Erase(const Slice& key, uint32_t hash) {\n mutex_lock l(mutex_);\n FinishErase(table_.Remove(key, hash));\n}\nvoid LRUCache::Prune() {\n mutex_lock l(mutex_);\n while (lru_.next != &lru_) {\n LRUHandle* e = lru_.next;\n assert(e->refs == 1);\n bool erased = FinishErase(table_.Remove(e->key(), e->hash));\n if (!erased) { \n assert(erased);\n }\n }\n}\nstatic const int kNumShardBits = 4;\nstatic const int kNumShards = 1 << kNumShardBits;\nclass ShardedLRUCache : public Cache {\n private:\n LRUCache shard_[kNumShards];\n mutex id_mutex_;\n uint64_t last_id_;\n static inline uint32_t HashSlice(const Slice& s) {\n return Hash(s.data(), s.size(), 0);\n }\n static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }\n public:\n explicit ShardedLRUCache(size_t capacity) : last_id_(0) {\n const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;\n for (int s = 0; s < kNumShards; s++) {\n shard_[s].SetCapacity(per_shard);\n }\n }\n ~ShardedLRUCache() override {}\n Handle* Insert(const Slice& key, void* value, size_t charge,\n void (*deleter)(const Slice& key, void* value)) override {\n const uint32_t hash = HashSlice(key);\n return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);\n }\n Handle* Lookup(const Slice& key) override {\n const uint32_t hash = HashSlice(key);\n return shard_[Shard(hash)].Lookup(key, hash);\n }\n void Release(Handle* handle) override {\n LRUHandle* h = reinterpret_cast(handle);\n shard_[Shard(h->hash)].Release(handle);\n }\n void Erase(const Slice& key) override {\n const uint32_t hash = HashSlice(key);\n shard_[Shard(hash)].Erase(key, hash);\n }\n void* Value(Handle* handle) override {\n return reinterpret_cast(handle)->value;\n }\n uint64_t NewId() override {\n mutex_lock l(id_mutex_);\n return ++(last_id_);\n }\n void Prune() override {\n for (int s = 0; s < kNumShards; s++) {\n shard_[s].Prune();\n }\n }\n size_t TotalCharge() const override {\n size_t total = 0;\n for (int s = 0; s < kNumShards; s++) {\n total += shard_[s].TotalCharge();\n }\n return total;\n }\n private:\n static uint32_t Hash(const char* data, size_t n, uint32_t seed) {\n const uint32_t m = 0xc6a4a793;\n const uint32_t r = 24;\n const char* limit = data + n;\n uint32_t h = seed ^ (n * m);\n while (data + 4 <= limit) {\n uint32_t w = core::DecodeFixed32(data);\n data += 4;\n h += w;\n h *= m;\n h ^= (h >> 16);\n }\n switch (limit - data) {\n case 3:\n h += static_cast(data[2]) << 16;\n ABSL_FALLTHROUGH_INTENDED;\n case 2:\n h += static_cast(data[1]) << 8;\n ABSL_FALLTHROUGH_INTENDED;\n case 1:\n h += static_cast(data[0]);\n h *= m;\n h ^= (h >> r);\n break;\n }\n return h;\n }\n};\n} \nCache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/tsl/lib/io/cache.h\"\n#include \n#include \n#include \"tsl/platform/coding.h\"\n#include \"tsl/platform/raw_coding.h\"\n#include \"tsl/platform/test.h\"\nnamespace tsl {\nnamespace table {\nstatic std::string EncodeKey(int k) {\n std::string result;\n core::PutFixed32(&result, k);\n return result;\n}\nstatic int DecodeKey(const Slice& k) {\n assert(k.size() == 4);\n return core::DecodeFixed32(k.data());\n}\nstatic void* EncodeValue(uintptr_t v) { return reinterpret_cast(v); }\nstatic int DecodeValue(void* v) { return reinterpret_cast(v); }\nclass CacheTest : public ::testing::Test {\n public:\n static void Deleter(const Slice& key, void* v) {\n current_->deleted_keys_.push_back(DecodeKey(key));\n current_->deleted_values_.push_back(DecodeValue(v));\n }\n static constexpr int kCacheSize = 1000;\n std::vector deleted_keys_;\n std::vector deleted_values_;\n Cache* cache_;\n CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }\n ~CacheTest() { delete cache_; }\n int Lookup(int key) {\n Cache::Handle* handle = cache_->Lookup(EncodeKey(key));\n const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));\n if (handle != nullptr) {\n cache_->Release(handle);\n }\n return r;\n }\n void Insert(int key, int value, int charge = 1) {\n cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,\n &CacheTest::Deleter));\n }\n Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {\n return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,\n &CacheTest::Deleter);\n }\n void Erase(int key) { cache_->Erase(EncodeKey(key)); }\n static CacheTest* current_;\n};\nCacheTest* CacheTest::current_;\nTEST_F(CacheTest, HitAndMiss) {\n ASSERT_EQ(-1, Lookup(100));\n Insert(100, 101);\n ASSERT_EQ(101, Lookup(100));\n ASSERT_EQ(-1, Lookup(200));\n ASSERT_EQ(-1, Lookup(300));\n Insert(200, 201);\n ASSERT_EQ(101, Lookup(100));\n ASSERT_EQ(201, Lookup(200));\n ASSERT_EQ(-1, Lookup(300));\n Insert(100, 102);\n ASSERT_EQ(102, Lookup(100));\n ASSERT_EQ(201, Lookup(200));\n ASSERT_EQ(-1, Lookup(300));\n ASSERT_EQ(1, deleted_keys_.size());\n ASSERT_EQ(100, deleted_keys_[0]);\n ASSERT_EQ(101, deleted_values_[0]);\n}\nTEST_F(CacheTest, Erase) {\n Erase(200);\n ASSERT_EQ(0, deleted_keys_.size());\n Insert(100, 101);\n Insert(200, 201);\n Erase(100);\n ASSERT_EQ(-1, Lookup(100));\n ASSERT_EQ(201, Lookup(200));\n ASSERT_EQ(1, deleted_keys_.size());\n ASSERT_EQ(100, deleted_keys_[0]);\n ASSERT_EQ(101, deleted_values_[0]);\n Erase(100);\n ASSERT_EQ(-1, Lookup(100));\n ASSERT_EQ(201, Lookup(200));\n ASSERT_EQ(1, deleted_keys_.size());\n}\nTEST_F(CacheTest, EntriesArePinned) {\n Insert(100, 101);\n Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));\n ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));\n Insert(100, 102);\n Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));\n ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));\n ASSERT_EQ(0, deleted_keys_.size());\n cache_->Release(h1);\n ASSERT_EQ(1, deleted_keys_.size());\n ASSERT_EQ(100, deleted_keys_[0]);\n ASSERT_EQ(101, deleted_values_[0]);\n Erase(100);\n ASSERT_EQ(-1, Lookup(100));\n ASSERT_EQ(1, deleted_keys_.size());\n cache_->Release(h2);\n ASSERT_EQ(2, deleted_keys_.size());\n ASSERT_EQ(100, deleted_keys_[1]);\n ASSERT_EQ(102, deleted_values_[1]);\n}\nTEST_F(CacheTest, EvictionPolicy) {\n Insert(100, 101);\n Insert(200, 201);\n Insert(300, 301);\n Cache::Handle* h = cache_->Lookup(EncodeKey(300));\n for (int i = 0; i < kCacheSize + 100; i++) {\n Insert(1000 + i, 2000 + i);\n ASSERT_EQ(2000 + i, Lookup(1000 + i));\n ASSERT_EQ(101, Lookup(100));\n }\n ASSERT_EQ(101, Lookup(100));\n ASSERT_EQ(-1, Lookup(200));\n ASSERT_EQ(301, Lookup(300));\n cache_->Release(h);\n}\nTEST_F(CacheTest, UseExceedsCacheSize) {\n std::vector h;\n for (int i = 0; i < kCacheSize + 100; i++) {\n h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));\n }\n for (int i = 0; i < h.size(); i++) {\n ASSERT_EQ(2000 + i, Lookup(1000 + i));\n }\n for (int i = 0; i < h.size(); i++) {\n cache_->Release(h[i]);\n }\n}\nTEST_F(CacheTest, HeavyEntries) {\n const int kLight = 1;\n const int kHeavy = 10;\n int added = 0;\n int index = 0;\n while (added < 2 * kCacheSize) {\n const int weight = (index & 1) ? kLight : kHeavy;\n Insert(index, 1000 + index, weight);\n added += weight;\n index++;\n }\n int cached_weight = 0;\n for (int i = 0; i < index; i++) {\n const int weight = (i & 1 ? kLight : kHeavy);\n int r = Lookup(i);\n if (r >= 0) {\n cached_weight += weight;\n ASSERT_EQ(1000 + i, r);\n }\n }\n ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);\n}\nTEST_F(CacheTest, NewId) {\n uint64_t a = cache_->NewId();\n uint64_t b = cache_->NewId();\n ASSERT_NE(a, b);\n}\nTEST_F(CacheTest, Prune) {\n Insert(1, 100);\n Insert(2, 200);\n Cache::Handle* handle = cache_->Lookup(EncodeKey(1));\n ASSERT_TRUE(handle);\n cache_->Prune();\n cache_->Release(handle);\n ASSERT_EQ(100, Lookup(1));\n ASSERT_EQ(-1, Lookup(2));\n}\nTEST_F(CacheTest, ZeroSizeCache) {\n delete cache_;\n cache_ = NewLRUCache(0);\n Insert(1, 100);\n ASSERT_EQ(-1, Lookup(1));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":272,"cells":{"ID":{"kind":"string","value":"0f77a7a9-b07e-42da-9f2a-f6b620f364f6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/arolla"},"File Name":{"kind":"string","value":"frame"},"File Path in Repository":{"kind":"string","value":"arolla/memory/frame.cc"},"File Path for Unit Test":{"kind":"string","value":"arolla/memory/frame_test.cc"},"Code":{"kind":"string","value":"#include \"arolla/memory/frame.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"arolla/util/algorithms.h\"\n#include \"arolla/util/memory.h\"\nnamespace arolla {\nstd::type_index FrameLayout::FieldFactory::type_index() const { return type_; }\nvoid FrameLayout::FieldFactory::Add(size_t offset) {\n offsets_.push_back(offset);\n}\nvoid FrameLayout::FieldFactory::AddDerived(\n const FieldFactory& derived_factory) {\n DCHECK(type_index() == derived_factory.type_index());\n for (size_t cur_offset : derived_factory.offsets_) {\n offsets_.push_back(cur_offset);\n }\n}\nFrameLayout::FieldFactory FrameLayout::FieldFactory::Derive(\n size_t offset) const {\n FieldFactory res = *this;\n for (size_t& cur_offset : res.offsets_) {\n cur_offset += offset;\n }\n return res;\n}\nvoid FrameLayout::FieldInitializers::AddOffsetToFactory(\n size_t offset, FieldFactory empty_factory) {\n auto it = type2factory.find(empty_factory.type_index());\n if (it == type2factory.end()) {\n bool inserted;\n std::tie(it, inserted) =\n type2factory.emplace(empty_factory.type_index(), factories.size());\n factories.push_back(std::move(empty_factory));\n }\n DCHECK_LT(it->second, factories.size());\n if (it->second < factories.size()) { \n factories[it->second].Add(offset);\n }\n}\nvoid FrameLayout::FieldInitializers::AddDerived(\n size_t offset, const FieldInitializers& derived_initializers) {\n for (const auto& [derived_tpe, derived_id] :\n derived_initializers.type2factory) {\n const auto& derived_factory = derived_initializers.factories[derived_id];\n if (auto it = type2factory.find(derived_tpe); it != type2factory.end()) {\n factories[it->second].AddDerived(derived_factory.Derive(offset));\n } else {\n type2factory.emplace(derived_tpe, factories.size());\n factories.push_back(derived_factory.Derive(offset));\n }\n }\n}\nFrameLayout::Slot FrameLayout::Builder::AddSubFrame(\n const FrameLayout& subframe) {\n alloc_size_ = RoundUp(alloc_size_, subframe.AllocAlignment().value);\n size_t offset = alloc_size_;\n alloc_size_ += subframe.AllocSize();\n alloc_alignment_ =\n std::max(alloc_alignment_, subframe.AllocAlignment().value);\n initializers_.AddDerived(offset, subframe.initializers_);\n#ifndef NDEBUG\n for (const auto& [field_offset, field_type] : subframe.registered_fields_) {\n registered_fields_.emplace(offset + field_offset, field_type);\n }\n#endif\n return FrameLayout::Slot(offset);\n}\nabsl::Status FrameLayout::Builder::RegisterUnsafeSlot(\n size_t byte_offset, size_t byte_size, const std::type_info& type) {\n return RegisterSlot(byte_offset, byte_size, type);\n}\nabsl::Status FrameLayout::Builder::RegisterSlot(size_t byte_offset,\n size_t byte_size,\n const std::type_info& type,\n bool allow_duplicates) {\n if (byte_offset == FrameLayout::Slot::kUninitializedOffset) {\n return absl::FailedPreconditionError(\n \"unable to register uninitialized slot\");\n }\n if (byte_offset > alloc_size_ || byte_size > alloc_size_ - byte_offset) {\n return absl::FailedPreconditionError(absl::StrCat(\n \"unable to register slot after the end of alloc, offset: \", byte_offset,\n \", size: \", byte_size, \", alloc size: \", alloc_size_));\n }\n#ifndef NDEBUG\n if (!registered_fields_.emplace(byte_offset, std::type_index(type)).second &&\n !allow_duplicates) {\n return absl::FailedPreconditionError(absl::StrCat(\n \"slot is already registered \", byte_offset, \" \", type.name()));\n }\n#endif\n return absl::OkStatus();\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"arolla/memory/frame.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/base/dynamic_annotations.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/status_matchers.h\"\n#include \"arolla/memory/memory_allocation.h\"\n#include \"arolla/util/demangle.h\"\n#include \"arolla/util/is_bzero_constructible.h\"\n#include \"arolla/util/memory.h\"\n#include \"arolla/util/status_macros_backport.h\" \nnamespace arolla::testing {\nnamespace {\nusing ::absl_testing::IsOk;\nusing ::absl_testing::StatusIs;\nusing ::testing::ElementsAre;\nusing ::testing::Eq;\nusing ::testing::HasSubstr;\nusing ::testing::IsEmpty;\nstruct SimpleStruct {\n int a;\n float b;\n};\nstruct InitializedStruct {\n int a = 1;\n float b = 2.0;\n};\nTEST(FrameLayoutTest, SlotOutput) {\n FrameLayout::Builder builder;\n auto slot = builder.AddSlot();\n std::ostringstream ss;\n ss << slot;\n EXPECT_EQ(ss.str(), std::string(\"Slot<\") + TypeName() + \">(0)\");\n}\nTEST(FrameLayoutTest, SimpleFields) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot();\n auto slot2 = builder.AddSlot();\n auto slot3 = builder.AddSlot();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), Eq(0));\n EXPECT_THAT(frame.Get(slot2), Eq(0.0f));\n EXPECT_THAT(frame.Get(slot3), Eq(0.0));\n frame.Set(slot1, 1);\n frame.Set(slot2, 2.0f);\n frame.Set(slot3, M_PI);\n EXPECT_THAT(frame.Get(slot1), Eq(1));\n EXPECT_THAT(frame.Get(slot2), Eq(2.0f));\n EXPECT_THAT(frame.Get(slot3), Eq(M_PI));\n}\nTEST(FrameLayoutTest, SimpleArrays) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot>();\n auto slot2 = builder.AddSlot>();\n auto slot3 = builder.AddSlot>();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), ElementsAre(0, 0, 0, 0));\n EXPECT_THAT(frame.Get(slot2), ElementsAre(0.0f, 0.0f, 0.0f, 0.0f));\n EXPECT_THAT(frame.Get(slot3), ElementsAre(0, 0, 0, 0));\n frame.Set(slot1, std::array{1, 2, 3, 4});\n frame.Set(slot2, std::array{1.0f, 2.0f, 3.0f, 4.0f});\n frame.Set(slot3, std::array{'a', 'b', 'c', 'd'});\n EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3, 4));\n EXPECT_THAT(frame.Get(slot2), ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));\n EXPECT_THAT(frame.Get(slot3), ElementsAre('a', 'b', 'c', 'd'));\n}\nTEST(FrameLayoutTest, SimplePointers) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot();\n auto slot2 = builder.AddSlot();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), Eq(nullptr));\n EXPECT_THAT(frame.Get(slot2), Eq(nullptr));\n int int_values[] = {1, 2, 3, 4};\n char text[] = \"It was a dark and stormy night.\";\n frame.Set(slot1, int_values);\n frame.Set(slot2, text);\n EXPECT_THAT(frame.Get(slot1), Eq(int_values));\n EXPECT_THAT(frame.Get(slot2), Eq(text));\n}\nTEST(FrameLayoutTest, SmartPointers) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot>();\n auto slot2 = builder.AddSlot>();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), Eq(nullptr));\n EXPECT_THAT(frame.Get(slot2), Eq(nullptr));\n frame.Set(slot1, std::make_unique(12));\n frame.Set(slot2,\n std::make_unique(\"It was a dark and stormy night.\"));\n EXPECT_THAT(*frame.Get(slot1), Eq(12));\n EXPECT_THAT(*frame.Get(slot2), Eq(\"It was a dark and stormy night.\"));\n}\nTEST(FrameLayoutTest, Vector) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot>();\n auto slot2 = builder.AddSlot>();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), IsEmpty());\n EXPECT_THAT(frame.Get(slot2), IsEmpty());\n auto* int_vector = frame.GetMutable(slot1);\n int_vector->push_back(1);\n int_vector->push_back(2);\n int_vector->push_back(3);\n auto* string_vector = frame.GetMutable(slot2);\n string_vector->push_back(\"How\");\n string_vector->push_back(\"now\");\n string_vector->push_back(\"brown\");\n string_vector->push_back(\"cow?\");\n EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3));\n EXPECT_THAT(frame.Get(slot2), ElementsAre(\"How\", \"now\", \"brown\", \"cow?\"));\n}\nTEST(FrameLayoutTest, Structs) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot();\n auto slot2 = builder.AddSlot();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n const SimpleStruct& s1 = frame.Get(slot1);\n EXPECT_THAT(s1.a, Eq(0));\n EXPECT_THAT(s1.b, Eq(0.0f));\n const InitializedStruct& s2 = frame.Get(slot2);\n EXPECT_THAT(s2.a, Eq(1));\n EXPECT_THAT(s2.b, Eq(2.0f));\n}\nTEST(FrameLayoutTest, AFewDifferentTypesWellInitialized) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot>();\n auto slot2 = builder.AddSlot>();\n auto slot3 = builder.AddSlot>();\n auto slot4 = builder.AddSlot();\n auto slot5 = builder.AddSlot();\n auto slot6 = builder.AddSlot>();\n auto slot7 = builder.AddSlot>();\n auto slot8 = builder.AddSlot>();\n auto slot9 = builder.AddSlot();\n auto layout = std::move(builder).Build();\n MemoryAllocation alloc(&layout);\n FramePtr frame = alloc.frame();\n EXPECT_THAT(frame.Get(slot1), IsEmpty());\n EXPECT_THAT(frame.Get(slot2), IsEmpty());\n EXPECT_THAT(frame.Get(slot3), IsEmpty());\n EXPECT_THAT(frame.Get(slot6), IsEmpty());\n EXPECT_THAT(frame.Get(slot7), IsEmpty());\n EXPECT_THAT(frame.Get(slot8), IsEmpty());\n const SimpleStruct& simple = frame.Get(slot4);\n EXPECT_THAT(simple.a, Eq(0));\n EXPECT_THAT(simple.b, Eq(0.0f));\n for (const InitializedStruct& init : {frame.Get(slot5), frame.Get(slot9)}) {\n EXPECT_THAT(init.a, Eq(1));\n EXPECT_THAT(init.b, Eq(2.0f));\n }\n}\nTEST(FrameLayoutTest, HasField) {\n FrameLayout::Builder builder;\n auto slot1 = builder.AddSlot();\n auto slot2 = builder.AddSlot>();\n auto slot3 = builder.AddSlot();\n auto slot4 = builder.AddSlot>();\n auto slot5 = builder.AddSlot();\n auto slot6 = builder.AddSlot>();\n auto layout = std::move(builder).Build();\n EXPECT_TRUE(layout.HasField(slot1.byte_offset(), typeid(int)));\n EXPECT_TRUE(layout.HasField(slot2.byte_offset(), typeid(std::vector)));\n EXPECT_TRUE(layout.HasField(slot3.byte_offset(), typeid(SimpleStruct)));\n EXPECT_TRUE(layout.HasField(slot4.byte_offset(),\n typeid(std::array)));\n EXPECT_TRUE(layout.HasField(slot5.byte_offset(), typeid(InitializedStruct)));\n EXPECT_TRUE(layout.HasField(slot6.byte_offset(),\n typeid(std::array)));\n}\nTEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) {\n FrameLayout::Builder builder;\n ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok());\n auto layout = std::move(builder).Build();\n EXPECT_TRUE(layout.HasField(0, typeid(std::monostate())));\n}\nTEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) {\n FrameLayout::Builder builder;\n auto slot = builder.AddSlot();\n auto slot_1part =\n FrameLayout::Slot::UnsafeSlotFromOffset(slot.byte_offset());\n auto slot_2part =\n FrameLayout::Slot::UnsafeSlotFromOffset(slot.byte_offset() + 2);\n ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk());\n ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk());\n ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t),\n typeid(int8_t)),\n IsOk());\n#ifndef NDEBUG\n EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part),\n StatusIs(absl::StatusCode::kFailedPrecondition,\n HasSubstr(\"slot is already registered\")));\n EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true),\n IsOk());\n#endif\n auto layout = std::move(builder).Build();\n EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t)));\n EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t)));\n EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t)));\n EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t)));\n#ifndef NDEBUG\n EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float)));\n EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t)));\n#endif\n}\nTEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) {\n FrameLayout::Builder builder;\n auto slot = builder.AddSlot();\n auto slot_1part =\n FrameLayout::Slot::UnsafeSlotFromOffset(slot.byte_offset());\n auto slot_after_end =\n FrameLayout::Slot::UnsafeSlotFromOffset(slot.byte_offset() + 4);\n auto uninitialized_slot =\n FrameLayout::Slot::UnsafeUninitializedSlot();\n auto status = builder.RegisterUnsafeSlot(slot_1part);\n ASSERT_OK(status);\n#ifndef NDEBUG\n status = builder.RegisterUnsafeSlot(slot);\n ASSERT_FALSE(status.ok());\n ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);\n EXPECT_THAT(status.message(), HasSubstr(\"slot is already registered\"));\n status = builder.RegisterUnsafeSlot(slot_1part);\n ASSERT_FALSE(status.ok());\n ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);\n EXPECT_THAT(status.message(), HasSubstr(\"slot is already registered\"));\n#endif\n status = builder.RegisterUnsafeSlot(slot_after_end);\n ASSERT_FALSE(status.ok());\n ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);\n EXPECT_THAT(status.message(),\n HasSubstr(\"unable to register slot after the end of alloc\"));\n status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int));\n ASSERT_FALSE(status.ok());\n ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);\n EXPECT_THAT(status.message(),\n HasSubstr(\"unable to register slot after the end of alloc, \"\n \"offset: 100, size: 4, alloc size: 4\"));\n status = builder.RegisterUnsafeSlot(uninitialized_slot);\n ASSERT_FALSE(status.ok());\n ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);\n EXPECT_THAT(status.message(),\n HasSubstr(\"unable to register uninitialized slot\"));\n}\nstruct SelfReference {\n const SelfReference* self;\n SelfReference() : self(this) {}\n SelfReference(const SelfReference&) = delete;\n SelfReference& operator=(const SelfReference&) = delete;\n ~SelfReference() {\n volatile auto secure_ptr = &self;\n *secure_ptr = nullptr;\n }\n};\nTEST(FrameLayoutTest, AddSubFrame) {\n FrameLayout subframe_layout;\n std::vector> field_slots;\n {\n FrameLayout::Builder builder;\n for (int i = 0; i < 2; ++i) {\n field_slots.push_back(builder.AddSlot());\n }\n subframe_layout = std::move(builder).Build();\n }\n FrameLayout frame_layout;\n std::vector> subframe_slots;\n {\n FrameLayout::Builder builder;\n builder.AddSlot(); \n for (int j = 0; j < 3; ++j) {\n subframe_slots.push_back(builder.AddSubFrame(subframe_layout));\n builder.AddSlot(); \n }\n frame_layout = std::move(builder).Build();\n }\n for (const auto& subframe_slot : subframe_slots) {\n for (const auto& field_slot : field_slots) {\n EXPECT_TRUE(frame_layout.HasField(\n subframe_slot.byte_offset() + field_slot.byte_offset(),\n typeid(SelfReference)));\n }\n }\n const auto alloc =\n AlignedAlloc(frame_layout.AllocAlignment(), frame_layout.AllocSize());\n frame_layout.InitializeAlignedAlloc(alloc.get());\n FramePtr frame(alloc.get(), &frame_layout);\n for (const auto& subframe_slot : subframe_slots) {\n for (const auto& field_slot : field_slots) {\n const void* subframe_ptr =\n frame.GetRawPointer(subframe_slot.byte_offset());\n ConstFramePtr subframe(subframe_ptr, &subframe_layout);\n const SelfReference& field = subframe.Get(field_slot);\n EXPECT_TRUE(field.self == &field);\n }\n }\n frame_layout.DestroyAlloc(alloc.get());\n ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(alloc.get(), frame_layout.AllocSize());\n for (const auto& subframe_slot : subframe_slots) {\n for (const auto& field_slot : field_slots) {\n const void* subframe_ptr =\n frame.GetRawPointer(subframe_slot.byte_offset());\n ConstFramePtr subframe(subframe_ptr, &subframe_layout);\n const SelfReference& field = subframe.Get(field_slot);\n EXPECT_TRUE(field.self == nullptr);\n }\n }\n}\nTEST(FrameLayoutTest, AddSubFrameAllocAlignment) {\n FrameLayout::Builder builder;\n builder.AddSubFrame(MakeTypeLayout>());\n builder.AddSubFrame(MakeTypeLayout>());\n auto frame_layout = std::move(builder).Build();\n EXPECT_EQ(frame_layout.AllocSize(), 32);\n EXPECT_EQ(frame_layout.AllocAlignment().value, 16);\n}\nTEST(FrameLayoutTest, ArrayCompatibility) {\n FrameLayout::Builder builder;\n builder.AddSlot>();\n builder.AddSlot>();\n auto frame_layout = std::move(builder).Build();\n EXPECT_EQ(frame_layout.AllocSize(), 32);\n EXPECT_EQ(frame_layout.AllocAlignment().value, 16);\n}\nTEST(FrameLayoutTest, InitDestroyAllocN) {\n static int instance_counter = 0;\n struct InstanceCounted {\n InstanceCounted() { ++instance_counter; }\n ~InstanceCounted() { --instance_counter; }\n };\n struct SelfReferenced {\n SelfReferenced() : self(this) {}\n SelfReferenced* self;\n };\n FrameLayout::Builder builder;\n auto int_slot = builder.AddSlot();\n auto self_ref_slot = builder.AddSlot();\n builder.AddSlot();\n auto layout = std::move(builder).Build();\n const int n = 10;\n const auto alloc =\n AlignedAlloc(layout.AllocAlignment(), layout.AllocSize() * n);\n layout.InitializeAlignedAllocN(alloc.get(), n);\n EXPECT_EQ(instance_counter, n);\n for (int i = 0; i < n; ++i) {\n ConstFramePtr ith_frame(\n static_cast(alloc.get()) + i * layout.AllocSize(),\n &layout);\n EXPECT_EQ(ith_frame.Get(int_slot), 0);\n EXPECT_EQ(ith_frame.Get(self_ref_slot).self, &ith_frame.Get(self_ref_slot));\n }\n layout.DestroyAllocN(alloc.get(), n);\n EXPECT_EQ(instance_counter, 0);\n}\nstruct IsBZeroConstructible {\n static bool ctor_called;\n static bool dtor_called;\n IsBZeroConstructible() { ctor_called = true; }\n ~IsBZeroConstructible() { dtor_called = true; }\n};\nbool IsBZeroConstructible::ctor_called;\nbool IsBZeroConstructible::dtor_called;\n} \n} \nnamespace arolla {\ntemplate <>\nstruct is_bzero_constructible<::arolla::testing::IsBZeroConstructible>\n : std::true_type {};\n} \nnamespace arolla::testing {\nnamespace {\nTEST(FrameLayoutTest, IsBZeroConstructibleHandling) {\n ASSERT_FALSE(IsBZeroConstructible::ctor_called);\n ASSERT_FALSE(IsBZeroConstructible::dtor_called);\n {\n auto layout = MakeTypeLayout();\n MemoryAllocation alloc(&layout);\n }\n EXPECT_FALSE(IsBZeroConstructible::ctor_called);\n EXPECT_TRUE(IsBZeroConstructible::dtor_called);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame_test.cc"},"Commit Hash":{"kind":"string","value":"1ca990dbeca224035efdabffecc7f3738df6b52c"}}},{"rowIdx":273,"cells":{"ID":{"kind":"string","value":"93ddf3a3-54f7-4d52-9e60-c7a46d275e27"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"codec_chain_spec"},"File Path in Repository":{"kind":"string","value":"tensorstore/driver/zarr3/codec/codec_chain_spec.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc"},"Code":{"kind":"string","value":"#include \"tensorstore/driver/zarr3/codec/codec_chain_spec.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/fixed_array.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_format.h\"\n#include \n#include \"tensorstore/codec_spec.h\"\n#include \"tensorstore/codec_spec_registry.h\"\n#include \"tensorstore/driver/zarr3/codec/bytes.h\"\n#include \"tensorstore/driver/zarr3/codec/codec.h\"\n#include \"tensorstore/driver/zarr3/codec/codec_spec.h\"\n#include \"tensorstore/driver/zarr3/codec/registry.h\"\n#include \"tensorstore/driver/zarr3/codec/transpose.h\"\n#include \"tensorstore/driver/zarr3/name_configuration_json_binder.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/internal/cache_key/cache_key.h\"\n#include \"tensorstore/internal/intrusive_ptr.h\"\n#include \"tensorstore/internal/json_binding/bindable.h\"\n#include \"tensorstore/internal/json_binding/json_binding.h\"\n#include \"tensorstore/internal/json_binding/std_array.h\"\n#include \"tensorstore/internal/json_binding/std_optional.h\"\n#include \"tensorstore/internal/unaligned_data_type_functions.h\"\n#include \"tensorstore/rank.h\"\n#include \"tensorstore/serialization/fwd.h\"\n#include \"tensorstore/serialization/json_bindable.h\"\n#include \"tensorstore/util/result.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status.h\"\n#include \"tensorstore/util/str_cat.h\"\nnamespace tensorstore {\nnamespace internal_zarr3 {\nnamespace jb = ::tensorstore::internal_json_binding;\nnamespace {\nstruct ZarrCodecJsonBinderImpl {\n static absl::Status FromJson(const ZarrCodecSpec::FromJsonOptions& options,\n ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j);\n static absl::Status ToJson(const ZarrCodecSpec::ToJsonOptions& options,\n const ZarrCodecSpec* const* obj,\n ::nlohmann::json* j);\n absl::Status operator()(std::true_type is_loading,\n const ZarrCodecSpec::FromJsonOptions& options,\n ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) const {\n return FromJson(options, obj, j);\n }\n template \n absl::Status operator()(std::false_type is_loading,\n const ZarrCodecSpec::ToJsonOptions& options, T* obj,\n ::nlohmann::json* j) const {\n static_assert(\n std::is_convertible_v);\n const ZarrCodecSpec* ptr = &**obj;\n return ToJson(options, &ptr, j);\n }\n};\nconstexpr inline ZarrCodecJsonBinderImpl ZarrCodecJsonBinder{};\nconstexpr auto ZarrCodecJsonBinderImplBase =\n [](auto is_loading, const auto& options, auto* obj, auto* j) {\n const auto& registry = GetCodecRegistry();\n if constexpr (is_loading) {\n if (options.constraints && j->is_string()) {\n ::nlohmann::json::object_t j_obj;\n j_obj.emplace(\"name\", std::move(*j));\n *j = std::move(j_obj);\n }\n }\n return jb::Object(NameConfigurationJsonBinder(\n registry.KeyBinder(), registry.RegisteredObjectBinder())) \n (is_loading, options, obj, j);\n };\nabsl::Status ZarrCodecJsonBinderImpl::FromJson(\n const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj,\n ::nlohmann::json* j) {\n return ZarrCodecJsonBinderImplBase(std::true_type{}, options, obj, j);\n}\nabsl::Status ZarrCodecJsonBinderImpl::ToJson(\n const ZarrCodecSpec::ToJsonOptions& options,\n const ZarrCodecSpec* const* obj, ::nlohmann::json* j) {\n return ZarrCodecJsonBinderImplBase(std::false_type{}, options, obj, j);\n}\nconstexpr auto ZarrCodecChainSpecJsonBinderImpl = jb::Compose<\n std::vector>(\n [](auto is_loading, const auto& options, auto* obj, auto* j) {\n if constexpr (is_loading) {\n auto it = j->begin(), end = j->end();\n for (; it != end && (*it)->kind() == ZarrCodecKind::kArrayToArray;\n ++it) {\n obj->array_to_array.push_back(\n internal::static_pointer_cast(\n std::move(*it)));\n }\n if (it != end && (*it)->kind() == ZarrCodecKind::kArrayToBytes) {\n obj->array_to_bytes =\n internal::static_pointer_cast(\n std::move(*it));\n ++it;\n } else if (!options.constraints) {\n return absl::InvalidArgumentError(\n \"array -> bytes codec must be specified\");\n }\n for (; it != end; ++it) {\n if ((*it)->kind() != ZarrCodecKind::kBytesToBytes) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Expected bytes -> bytes codec, but received: \",\n jb::ToJson(*it, ZarrCodecJsonBinder).value().dump()));\n }\n obj->bytes_to_bytes.push_back(\n internal::static_pointer_cast(\n std::move(*it)));\n }\n } else {\n j->insert(j->end(), obj->array_to_array.begin(),\n obj->array_to_array.end());\n if (obj->array_to_bytes) {\n j->push_back(obj->array_to_bytes);\n }\n j->insert(j->end(), obj->bytes_to_bytes.begin(),\n obj->bytes_to_bytes.end());\n }\n return absl::OkStatus();\n },\n jb::Array(ZarrCodecJsonBinder));\n} \nTENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec,\n ZarrCodecChainSpecJsonBinderImpl);\nnamespace {\nResult GetDefaultArrayToBytesCodecSpec(\n const ArrayCodecResolveParameters& decoded) {\n if (internal::IsTrivialDataType(decoded.dtype)) {\n return DefaultBytesCodec();\n }\n return absl::InternalError(tensorstore::StrCat(\n \"No default codec defined for data type \", decoded.dtype));\n}\nabsl::Status CodecResolveError(const ZarrCodecSpec& codec_spec,\n std::string_view message,\n const absl::Status& status) {\n return tensorstore::MaybeAnnotateStatus(\n status, tensorstore::StrCat(\n \"Error \", message, \" through \",\n jb::ToJson(&codec_spec, ZarrCodecJsonBinder).value().dump()));\n}\n} \nsize_t ZarrCodecChainSpec::sharding_height() const {\n return array_to_bytes ? array_to_bytes->sharding_height() : 0;\n}\nabsl::Status ZarrCodecChainSpec::GetDecodedChunkLayout(\n const ArrayDataTypeAndShapeInfo& array_info,\n ArrayCodecChunkLayoutInfo& decoded) const {\n absl::FixedArray array_infos(\n array_to_array.size());\n const ArrayDataTypeAndShapeInfo* decoded_array_info = &array_info;\n for (size_t i = 0; i < array_to_array.size(); ++i) {\n const auto& codec_spec = *array_to_array[i];\n auto& encoded_array_info = array_infos[i];\n TENSORSTORE_RETURN_IF_ERROR(\n codec_spec.PropagateDataTypeAndShape(*decoded_array_info,\n encoded_array_info),\n CodecResolveError(codec_spec, \"propagating data type and shape\", _));\n decoded_array_info = &encoded_array_info;\n }\n std::optional temp_info[2];\n const ArrayCodecChunkLayoutInfo* encoded_info;\n if (array_to_bytes) {\n auto& decoded_info = array_infos.empty() ? decoded : temp_info[0].emplace();\n TENSORSTORE_RETURN_IF_ERROR(\n array_to_bytes->GetDecodedChunkLayout(\n array_infos.empty() ? array_info : array_infos.back(),\n decoded_info),\n CodecResolveError(*array_to_bytes, \"propagating chunk layout\", _));\n encoded_info = &decoded_info;\n } else if (!array_to_array.empty()) {\n encoded_info = &temp_info[0].emplace();\n }\n for (size_t i = array_to_array.size(); i--;) {\n auto& decoded_info =\n i == 0 ? decoded : temp_info[(array_to_array.size() - i) % 2].emplace();\n const auto& codec_spec = *array_to_array[i];\n TENSORSTORE_RETURN_IF_ERROR(\n codec_spec.GetDecodedChunkLayout(\n array_infos[i], *encoded_info,\n i == 0 ? array_info : array_infos[i - 1], decoded_info),\n CodecResolveError(codec_spec, \"propagating chunk layout\", _));\n encoded_info = &decoded_info;\n }\n return absl::OkStatus();\n}\nResult>\nZarrCodecChainSpec::Resolve(ArrayCodecResolveParameters&& decoded,\n BytesCodecResolveParameters& encoded,\n ZarrCodecChainSpec* resolved_spec) const {\n auto chain = internal::MakeIntrusivePtr();\n std::optional temp_array_resolve_params[2];\n chain->array_to_array.reserve(array_to_array.size());\n chain->bytes_to_bytes.reserve(bytes_to_bytes.size());\n if (resolved_spec) {\n assert(resolved_spec != this);\n assert(resolved_spec->array_to_array.empty());\n resolved_spec->array_to_array.reserve(array_to_array.size());\n assert(!resolved_spec->array_to_bytes);\n assert(resolved_spec->bytes_to_bytes.empty());\n resolved_spec->bytes_to_bytes.reserve(bytes_to_bytes.size());\n }\n ArrayCodecResolveParameters* decoded_params = &decoded;\n size_t temp_i = 0;\n const auto resolve_array_to_array =\n [&](const ZarrArrayToArrayCodecSpec& codec_spec) -> absl::Status {\n auto& encoded_params = temp_array_resolve_params[(temp_i++) % 2].emplace();\n TENSORSTORE_ASSIGN_OR_RETURN(\n auto codec,\n codec_spec.Resolve(std::move(*decoded_params), encoded_params,\n resolved_spec\n ? &resolved_spec->array_to_array.emplace_back()\n : nullptr),\n CodecResolveError(codec_spec, \"resolving codec spec\", _));\n chain->array_to_array.push_back(std::move(codec));\n decoded_params = &encoded_params;\n return absl::OkStatus();\n };\n for (size_t i = 0; i < array_to_array.size(); ++i) {\n TENSORSTORE_RETURN_IF_ERROR(resolve_array_to_array(*array_to_array[i]));\n }\n std::optional temp_bytes_resolve_params[2];\n auto* bytes_decoded_params = &temp_bytes_resolve_params[0].emplace();\n ZarrArrayToBytesCodecSpec::Ptr temp_array_to_bytes_codec;\n auto* array_to_bytes_codec_ptr = this->array_to_bytes.get();\n if (!array_to_bytes_codec_ptr) {\n TENSORSTORE_ASSIGN_OR_RETURN(\n temp_array_to_bytes_codec,\n GetDefaultArrayToBytesCodecSpec(*decoded_params));\n array_to_bytes_codec_ptr = temp_array_to_bytes_codec.get();\n }\n DimensionIndex preferred_order[kMaxRank];\n if (DimensionIndex rank = decoded_params->rank;\n decoded_params->inner_order &&\n !array_to_bytes_codec_ptr->SupportsInnerOrder(\n *decoded_params, span(&preferred_order[0], rank))) {\n const auto& existing_inner_order = *decoded_params->inner_order;\n std::vector new_order(rank);\n for (DimensionIndex i = 0; i < rank; ++i) {\n new_order[preferred_order[i]] = existing_inner_order[i];\n }\n TENSORSTORE_RETURN_IF_ERROR(\n resolve_array_to_array(*internal::MakeIntrusivePtr(\n TransposeCodecSpec::Options{std::move(new_order)})));\n }\n TENSORSTORE_ASSIGN_OR_RETURN(\n chain->array_to_bytes,\n array_to_bytes_codec_ptr->Resolve(\n std::move(*decoded_params), *bytes_decoded_params,\n resolved_spec ? &resolved_spec->array_to_bytes : nullptr),\n CodecResolveError(*array_to_bytes, \"resolving codec spec\", _));\n if (chain->array_to_bytes->is_sharding_codec() && !bytes_to_bytes.empty()) {\n return absl::InvalidArgumentError(absl::StrFormat(\n \"Sharding codec %s is not compatible with subsequent bytes -> \"\n \"bytes codecs %s that apply to the entire shard. Instead, \"\n \"bytes -> bytes codecs may be specified as inner codecs that apply \"\n \"to each sub-chunk individually.\",\n jb::ToJson(array_to_bytes_codec_ptr, ZarrCodecJsonBinder)\n .value()\n .dump(),\n jb::ToJson(bytes_to_bytes, jb::Array(ZarrCodecJsonBinder))\n .value()\n .dump()));\n }\n for (size_t i = 0; i < bytes_to_bytes.size(); ++i) {\n auto& encoded_params = temp_bytes_resolve_params[(i + 1) % 2].emplace();\n const auto& codec_spec = *bytes_to_bytes[i];\n TENSORSTORE_ASSIGN_OR_RETURN(\n auto codec,\n codec_spec.Resolve(std::move(*bytes_decoded_params), encoded_params,\n resolved_spec\n ? &resolved_spec->bytes_to_bytes.emplace_back()\n : nullptr),\n CodecResolveError(codec_spec, \"resolving codec spec\", _));\n bytes_decoded_params = &encoded_params;\n chain->bytes_to_bytes.push_back(std::move(codec));\n }\n encoded = std::move(*bytes_decoded_params);\n return chain;\n}\nnamespace {\ntemplate \nstd::string MergeErrorMessage(const T& a, const T& b, const Binder& binder) {\n return absl::StrFormat(\"Cannot merge zarr codec constraints %s and %s\",\n jb::ToJson(a, binder).value().dump(),\n jb::ToJson(b, binder).value().dump());\n}\nstd::string MergeErrorMessage(const ZarrCodecSpec& a, const ZarrCodecSpec& b) {\n return MergeErrorMessage(ZarrCodecSpec::Ptr(&a), ZarrCodecSpec::Ptr(&b),\n ZarrCodecJsonBinder);\n}\ntemplate \nvoid EnsureMutableCodecSpec(internal::IntrusivePtr& ptr) {\n static_assert(std::is_base_of_v);\n assert(ptr);\n if (ptr->use_count() > 1) {\n ptr = internal::static_pointer_cast(ptr->Clone());\n }\n}\nabsl::Status MergeZarrCodecSpecs(ZarrCodecSpec::Ptr& target,\n const ZarrCodecSpec* source, bool strict) {\n if (!source) {\n return absl::OkStatus();\n }\n if (!target) {\n target.reset(source);\n return absl::OkStatus();\n }\n absl::Status status;\n const auto& target_ref = *target;\n const auto& source_ref = *source;\n if (typeid(target_ref) != typeid(source_ref)) {\n status = absl::FailedPreconditionError(\"\");\n } else {\n EnsureMutableCodecSpec(target);\n status = const_cast(*target).MergeFrom(*source, strict);\n }\n if (status.ok()) return absl::OkStatus();\n return tensorstore::MaybeAnnotateStatus(status,\n MergeErrorMessage(*target, *source));\n}\ntemplate \nabsl::Status MergeZarrCodecSpecs(typename T::Ptr& target, const T* source,\n bool strict) {\n static_assert(std::is_base_of_v);\n ZarrCodecSpec::Ptr target_base = std::move(target);\n auto status = MergeZarrCodecSpecs(target_base, source, strict);\n target = internal::static_pointer_cast(std::move(target_base));\n TENSORSTORE_RETURN_IF_ERROR(status);\n return absl::OkStatus();\n}\ntemplate \nabsl::Status MergeZarrCodecSpecs(std::vector& targets,\n const std::vector& sources, bool strict) {\n constexpr bool kIsArrayToArray =\n std::is_same_v;\n size_t merge_count = targets.size();\n bool size_mismatch = targets.size() != sources.size();\n if constexpr (kIsArrayToArray) {\n if (!strict) {\n if (sources.size() == targets.size() + 1 &&\n typeid(*sources.back()) == typeid(TransposeCodecSpec)) {\n targets.push_back(sources.back());\n size_mismatch = false;\n } else if (sources.size() + 1 == targets.size() &&\n typeid(*targets.back()) == typeid(TransposeCodecSpec)) {\n --merge_count;\n size_mismatch = false;\n }\n }\n }\n if (size_mismatch) {\n return tensorstore::MaybeAnnotateStatus(\n absl::FailedPreconditionError(absl::StrFormat(\n \"Mismatch in number of %s codecs (%d vs %d)\",\n kIsArrayToArray ? \"array -> array\" : \"bytes -> bytes\",\n targets.size(), sources.size())),\n MergeErrorMessage(targets, sources, jb::Array(ZarrCodecJsonBinder)));\n }\n for (size_t i = 0; i < merge_count; ++i) {\n TENSORSTORE_RETURN_IF_ERROR(\n MergeZarrCodecSpecs(targets[i], sources[i].get(), strict));\n }\n return absl::OkStatus();\n}\n} \nabsl::Status ZarrCodecChainSpec::MergeFrom(const ZarrCodecChainSpec& other,\n bool strict) {\n if (!strict) {\n size_t self_sharding_height = sharding_height();\n size_t other_sharding_height = other.sharding_height();\n if (self_sharding_height > other_sharding_height &&\n array_to_array.empty() && bytes_to_bytes.empty()) {\n EnsureMutableCodecSpec(array_to_bytes);\n return static_cast(\n const_cast(*array_to_bytes))\n .MergeSubChunkCodecsFrom(other, strict);\n }\n if (self_sharding_height < other_sharding_height &&\n other.array_to_array.empty() && other.bytes_to_bytes.empty()) {\n auto new_array_to_bytes_codec =\n internal::static_pointer_cast(\n other.array_to_bytes->Clone());\n TENSORSTORE_RETURN_IF_ERROR(\n const_cast(*new_array_to_bytes_codec)\n .MergeSubChunkCodecsFrom(*this, strict));\n array_to_array.clear();\n bytes_to_bytes.clear();\n array_to_bytes = std::move(new_array_to_bytes_codec);\n return absl::OkStatus();\n }\n }\n TENSORSTORE_RETURN_IF_ERROR(\n MergeZarrCodecSpecs(array_to_array, other.array_to_array, strict));\n TENSORSTORE_RETURN_IF_ERROR(\n MergeZarrCodecSpecs(array_to_bytes, other.array_to_bytes.get(), strict));\n TENSORSTORE_RETURN_IF_ERROR(\n MergeZarrCodecSpecs(bytes_to_bytes, other.bytes_to_bytes, strict));\n return absl::OkStatus();\n}\nabsl::Status MergeZarrCodecSpecs(\n std::optional& target,\n const std::optional& source, bool strict) {\n if (!target) {\n if (source) {\n target = *source;\n }\n return absl::OkStatus();\n }\n if (!source) {\n return absl::OkStatus();\n }\n return target->MergeFrom(*source, strict);\n}\nbool ZarrShardingCodecSpec::SupportsInnerOrder(\n const ArrayCodecResolveParameters& decoded,\n span preferred_inner_order) const {\n return true;\n}\nsize_t ZarrShardingCodecSpec::sharding_height() const {\n auto* sub_chunk_codecs = this->GetSubChunkCodecs();\n return 1 + (sub_chunk_codecs ? sub_chunk_codecs->sharding_height() : 0);\n}\nCodecSpec TensorStoreCodecSpec::Clone() const {\n return internal::CodecDriverSpec::Make(*this);\n}\nabsl::Status TensorStoreCodecSpec::DoMergeFrom(\n const internal::CodecDriverSpec& other_base) {\n if (typeid(other_base) != typeid(TensorStoreCodecSpec)) {\n return absl::InvalidArgumentError(\"\");\n }\n auto& other = static_cast(other_base);\n return MergeZarrCodecSpecs(codecs, other.codecs, false);\n}\nTENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(\n TensorStoreCodecSpec,\n jb::Sequence( \n jb::Member(\"codecs\",\n jb::Projection<&TensorStoreCodecSpec::codecs>(jb::Optional(\n ZarrCodecChainJsonBinder))) \n ))\nnamespace {\nconst internal::CodecSpecRegistration\n encoding_registration;\n} \n} \nnamespace internal {\nvoid CacheKeyEncoder::Encode(\n std::string* out, const internal_zarr3::ZarrCodecChainSpec& value) {\n internal::EncodeCacheKey(out, value.ToJson().value().dump());\n}\n} \n} \nTENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(\n tensorstore::internal_zarr3::ZarrCodecChainSpec,\n tensorstore::serialization::JsonBindableSerializer<\n tensorstore::internal_zarr3::ZarrCodecChainSpec>())"},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/driver/zarr3/codec/codec_chain_spec.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorstore/codec_spec.h\"\n#include \"tensorstore/driver/zarr3/codec/codec_test_util.h\"\n#include \"tensorstore/internal/json_gtest.h\"\n#include \"tensorstore/util/status_testutil.h\"\nnamespace {\nusing ::tensorstore::CodecSpec;\nusing ::tensorstore::MatchesJson;\nusing ::tensorstore::MatchesStatus;\nusing ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;\nusing ::tensorstore::internal_zarr3::TestCodecMerge;\nusing ::tensorstore::internal_zarr3::ZarrCodecChainSpec;\nTEST(CodecMergeTest, Basic) {\n TENSORSTORE_ASSERT_OK_AND_ASSIGN(\n auto a,\n CodecSpec::FromJson({\n {\"driver\", \"zarr3\"},\n {\"codecs\",\n {{\n {\"name\", \"sharding_indexed\"},\n {\"configuration\",\n {\n {\"chunk_shape\", {30, 40, 50}},\n {\"index_codecs\",\n {GetDefaultBytesCodecJson(), {{\"name\", \"crc32c\"}}}},\n {\"codecs\",\n {\n {{\"name\", \"transpose\"},\n {\"configuration\", {{\"order\", {2, 0, 1}}}}},\n GetDefaultBytesCodecJson(),\n {{\"name\", \"gzip\"}, {\"configuration\", {{\"level\", 6}}}},\n }},\n }},\n }}},\n }));\n TENSORSTORE_ASSERT_OK_AND_ASSIGN(\n auto b, CodecSpec::FromJson(\n {{\"driver\", \"zarr3\"},\n {\"codecs\",\n {{{\"name\", \"gzip\"}, {\"configuration\", {{\"level\", 5}}}}}}}));\n EXPECT_THAT(a.MergeFrom(b),\n MatchesStatus(absl::StatusCode::kFailedPrecondition,\n \".*: Incompatible \\\"level\\\": 6 vs 5\"));\n}\nTEST(CodecChainSpecTest, MissingArrayToBytes) {\n EXPECT_THAT(ZarrCodecChainSpec::FromJson(::nlohmann::json::array_t()),\n MatchesStatus(absl::StatusCode::kInvalidArgument,\n \"array -> bytes codec must be specified\"));\n}\nTEST(CodecChainSpecTest, MergeCodecNameMismatch) {\n EXPECT_THAT(\n TestCodecMerge({\"gzip\"}, {\"crc32c\"}, true),\n MatchesStatus(absl::StatusCode::kFailedPrecondition, \"Cannot merge .*\"));\n}\nTEST(CodecChainSpecTest, MergeArrayToBytes) {\n EXPECT_THAT(\n TestCodecMerge(\n {{{\"name\", \"bytes\"}, {\"configuration\", {{\"endian\", \"little\"}}}}},\n ::nlohmann::json::array_t(), true),\n ::testing::Optional(MatchesJson(\n {{{\"name\", \"bytes\"}, {\"configuration\", {{\"endian\", \"little\"}}}}})));\n}\nTEST(CodecChainSpecTest, ExtraTranspose) {\n ::nlohmann::json a = {\n {{\"name\", \"transpose\"}, {\"configuration\", {{\"order\", {0, 2, 1}}}}},\n {{\"name\", \"bytes\"}, {\"configuration\", {{\"endian\", \"little\"}}}},\n };\n ::nlohmann::json b = {\n {{\"name\", \"bytes\"}, {\"configuration\", {{\"endian\", \"little\"}}}},\n };\n EXPECT_THAT(TestCodecMerge(a, b, false),\n ::testing::Optional(MatchesJson(a)));\n EXPECT_THAT(\n TestCodecMerge(a, b, true),\n MatchesStatus(absl::StatusCode::kFailedPrecondition,\n \".*: Mismatch in number of array -> array codecs.*\"));\n}\nTEST(CodecChainSpecTest, ExtraSharding) {\n ::nlohmann::json a = {{\n {\"name\", \"sharding_indexed\"},\n {\"configuration\",\n {\n {\"chunk_shape\", {30, 40, 50}},\n {\"index_codecs\", {GetDefaultBytesCodecJson(), {{\"name\", \"crc32c\"}}}},\n {\"codecs\",\n {\n {{\"name\", \"transpose\"},\n {\"configuration\", {{\"order\", {2, 0, 1}}}}},\n GetDefaultBytesCodecJson(),\n {{\"name\", \"gzip\"}, {\"configuration\", {{\"level\", 6}}}},\n }},\n }},\n }};\n ::nlohmann::json b = {\n {{\"name\", \"transpose\"}, {\"configuration\", {{\"order\", {2, 0, 1}}}}},\n GetDefaultBytesCodecJson(),\n {{\"name\", \"gzip\"}, {\"configuration\", {{\"level\", 6}}}},\n };\n ::nlohmann::json c = {\n GetDefaultBytesCodecJson(),\n {{\"name\", \"gzip\"}, {\"configuration\", {{\"level\", 6}}}},\n };\n EXPECT_THAT(TestCodecMerge(a, b, false),\n ::testing::Optional(MatchesJson(a)));\n EXPECT_THAT(TestCodecMerge(a, c, false),\n ::testing::Optional(MatchesJson(a)));\n EXPECT_THAT(\n TestCodecMerge(a, b, true),\n MatchesStatus(absl::StatusCode::kFailedPrecondition,\n \".*: Mismatch in number of array -> array codecs.*\"));\n EXPECT_THAT(TestCodecMerge(a, c, true),\n MatchesStatus(absl::StatusCode::kFailedPrecondition,\n \"Cannot merge zarr codec constraints .*\"));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":274,"cells":{"ID":{"kind":"string","value":"b0a5d668-8561-4789-820f-0d628bfa1730"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"journal"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/data/service/journal.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/data/service/journal_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/data/service/journal.h\"\n#include \n#include \n#include \n#include \n#include \"absl/memory/memory.h\"\n#include \"absl/status/status.h\"\n#include \"tensorflow/core/data/service/journal.pb.h\"\n#include \"tensorflow/core/lib/io/record_reader.h\"\n#include \"tensorflow/core/lib/io/record_writer.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/path.h\"\n#include \"tensorflow/core/platform/regexp.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nconstexpr StringPiece kJournal = \"journal\";\nStatus ParseSequenceNumber(const std::string& journal_file,\n int64_t* sequence_number) {\n if (!RE2::FullMatch(journal_file, \".*_(\\\\d+)\", sequence_number)) {\n return errors::InvalidArgument(\"Failed to parse journal file name: \",\n journal_file);\n }\n return absl::OkStatus();\n}\n} \nstd::string DataServiceJournalFile(const std::string& journal_dir,\n int64_t sequence_number) {\n return io::JoinPath(journal_dir,\n absl::StrCat(kJournal, \"_\", sequence_number));\n}\nFileJournalWriter::FileJournalWriter(Env* env, const std::string& journal_dir)\n : env_(env), journal_dir_(journal_dir) {}\nStatus FileJournalWriter::EnsureInitialized() {\n if (writer_) {\n return absl::OkStatus();\n }\n std::vector journal_files;\n TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(journal_dir_));\n TF_RETURN_IF_ERROR(env_->GetChildren(journal_dir_, &journal_files));\n int64_t latest_sequence_number = -1;\n for (const auto& file : journal_files) {\n int64_t sequence_number;\n TF_RETURN_IF_ERROR(ParseSequenceNumber(file, &sequence_number));\n latest_sequence_number = std::max(latest_sequence_number, sequence_number);\n }\n std::string journal_file =\n DataServiceJournalFile(journal_dir_, latest_sequence_number + 1);\n TF_RETURN_IF_ERROR(env_->NewAppendableFile(journal_file, &file_));\n writer_ = std::make_unique(file_.get());\n VLOG(1) << \"Created journal writer to write to \" << journal_file;\n return absl::OkStatus();\n}\nStatus FileJournalWriter::Write(const Update& update) {\n TF_RETURN_IF_ERROR(EnsureInitialized());\n std::string s = update.SerializeAsString();\n if (s.empty()) {\n return errors::Internal(\"Failed to serialize update \", update.DebugString(),\n \" to string\");\n }\n TF_RETURN_IF_ERROR(writer_->WriteRecord(s));\n TF_RETURN_IF_ERROR(writer_->Flush());\n TF_RETURN_IF_ERROR(file_->Sync());\n if (VLOG_IS_ON(4)) {\n VLOG(4) << \"Wrote journal entry: \" << update.DebugString();\n }\n return absl::OkStatus();\n}\nFileJournalReader::FileJournalReader(Env* env, StringPiece journal_dir)\n : env_(env), journal_dir_(journal_dir) {}\nStatus FileJournalReader::EnsureInitialized() {\n if (reader_) {\n return absl::OkStatus();\n }\n return UpdateFile(DataServiceJournalFile(journal_dir_, 0));\n}\nStatus FileJournalReader::Read(Update& update, bool& end_of_journal) {\n TF_RETURN_IF_ERROR(EnsureInitialized());\n while (true) {\n tstring record;\n Status s = reader_->ReadRecord(&record);\n if (absl::IsOutOfRange(s)) {\n sequence_number_++;\n std::string next_journal_file =\n DataServiceJournalFile(journal_dir_, sequence_number_);\n if (absl::IsNotFound(env_->FileExists(next_journal_file))) {\n VLOG(3) << \"Next journal file \" << next_journal_file\n << \" does not exist. End of journal reached.\";\n end_of_journal = true;\n return absl::OkStatus();\n }\n TF_RETURN_IF_ERROR(UpdateFile(next_journal_file));\n continue;\n }\n TF_RETURN_IF_ERROR(s);\n if (!update.ParseFromString(record)) {\n return errors::DataLoss(\"Failed to parse journal record.\");\n }\n if (VLOG_IS_ON(4)) {\n VLOG(4) << \"Read journal entry: \" << update.DebugString();\n }\n end_of_journal = false;\n return absl::OkStatus();\n }\n}\nStatus FileJournalReader::UpdateFile(const std::string& filename) {\n VLOG(1) << \"Reading from journal file \" << filename;\n TF_RETURN_IF_ERROR(env_->NewRandomAccessFile(filename, &file_));\n io::RecordReaderOptions opts;\n opts.buffer_size = 2 << 20; \n reader_ = std::make_unique(file_.get(), opts);\n return absl::OkStatus();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/data/service/journal.h\"\n#include \n#include \n#include \n#include \"absl/memory/memory.h\"\n#include \"absl/status/status.h\"\n#include \"tensorflow/core/data/service/common.pb.h\"\n#include \"tensorflow/core/data/service/journal.pb.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/path.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/protobuf/data_service.pb.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nusing ::testing::HasSubstr;\nbool NewJournalDir(std::string& journal_dir) {\n std::string filename = testing::TmpDir();\n if (!Env::Default()->CreateUniqueFileName(&filename, \"journal_dir\")) {\n return false;\n }\n journal_dir = filename;\n return true;\n}\nUpdate MakeCreateIterationUpdate() {\n Update update;\n CreateIterationUpdate* create_iteration = update.mutable_create_iteration();\n create_iteration->set_job_id(3);\n create_iteration->set_iteration_id(8);\n create_iteration->set_repetition(5);\n return update;\n}\nUpdate MakeFinishTaskUpdate() {\n Update update;\n FinishTaskUpdate* finish_task = update.mutable_finish_task();\n finish_task->set_task_id(8);\n return update;\n}\nUpdate MakeRegisterDatasetUpdate() {\n Update update;\n RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();\n register_dataset->set_dataset_id(\"dataset_id\");\n register_dataset->set_fingerprint(3);\n return update;\n}\nStatus CheckJournalContent(StringPiece journal_dir,\n const std::vector& expected) {\n FileJournalReader reader(Env::Default(), journal_dir);\n for (const auto& update : expected) {\n Update result;\n bool end_of_journal = true;\n TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));\n EXPECT_FALSE(end_of_journal);\n EXPECT_EQ(result.SerializeAsString(), update.SerializeAsString());\n }\n Update result;\n bool end_of_journal = false;\n TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));\n EXPECT_TRUE(end_of_journal);\n return absl::OkStatus();\n}\n} \nTEST(Journal, RoundTripMultiple) {\n std::string journal_dir;\n EXPECT_TRUE(NewJournalDir(journal_dir));\n std::vector updates = {MakeCreateIterationUpdate(),\n MakeRegisterDatasetUpdate(),\n MakeFinishTaskUpdate()};\n FileJournalWriter writer(Env::Default(), journal_dir);\n for (const auto& update : updates) {\n TF_EXPECT_OK(writer.Write(update));\n }\n TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));\n}\nTEST(Journal, AppendExistingJournal) {\n std::string journal_dir;\n EXPECT_TRUE(NewJournalDir(journal_dir));\n std::vector updates = {MakeCreateIterationUpdate(),\n MakeRegisterDatasetUpdate(),\n MakeFinishTaskUpdate()};\n for (const auto& update : updates) {\n FileJournalWriter writer(Env::Default(), journal_dir);\n TF_EXPECT_OK(writer.Write(update));\n }\n TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));\n}\nTEST(Journal, MissingFile) {\n std::string journal_dir;\n EXPECT_TRUE(NewJournalDir(journal_dir));\n FileJournalReader reader(Env::Default(), journal_dir);\n Update result;\n bool end_of_journal = true;\n Status s = reader.Read(result, end_of_journal);\n EXPECT_TRUE(absl::IsNotFound(s));\n}\nTEST(Journal, NonRecordData) {\n std::string journal_dir;\n EXPECT_TRUE(NewJournalDir(journal_dir));\n TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));\n {\n std::unique_ptr file;\n TF_ASSERT_OK(Env::Default()->NewAppendableFile(\n DataServiceJournalFile(journal_dir, 0), &file));\n TF_ASSERT_OK(file->Append(\"not record data\"));\n }\n FileJournalReader reader(Env::Default(), journal_dir);\n Update result;\n bool end_of_journal = true;\n Status s = reader.Read(result, end_of_journal);\n EXPECT_THAT(s.message(), HasSubstr(\"corrupted record\"));\n EXPECT_EQ(s.code(), error::DATA_LOSS);\n}\nTEST(Journal, InvalidRecordData) {\n std::string journal_dir;\n EXPECT_TRUE(NewJournalDir(journal_dir));\n TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));\n {\n std::unique_ptr file;\n TF_ASSERT_OK(Env::Default()->NewAppendableFile(\n DataServiceJournalFile(journal_dir, 0), &file));\n auto writer = std::make_unique(file.get());\n TF_ASSERT_OK(writer->WriteRecord(\"not serialized proto\"));\n }\n FileJournalReader reader(Env::Default(), journal_dir);\n Update result;\n bool end_of_journal = true;\n Status s = reader.Read(result, end_of_journal);\n EXPECT_THAT(s.message(), HasSubstr(\"Failed to parse journal record\"));\n EXPECT_EQ(s.code(), error::DATA_LOSS);\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":275,"cells":{"ID":{"kind":"string","value":"9023f77c-6d2f-4bb9-b69d-4e2e666b234d"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"stream"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/tfrt/runtime/stream.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/tfrt/runtime/stream_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/tfrt/runtime/stream.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/base/thread_annotations.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/functional/any_invocable.h\"\n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/synchronization/mutex.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"absl/utility/utility.h\"\n#include \"mlir/IR/Builders.h\" \n#include \"mlir/IR/BuiltinOps.h\" \n#include \"tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor.pb.h\"\n#include \"tsl/platform/random.h\"\n#include \"tsl/platform/threadpool_interface.h\"\n#include \"tsl/profiler/lib/traceme.h\"\nnamespace tensorflow {\nnamespace tfrt_stub {\nabsl::StatusOr> CreateStreamCallbackId(\n absl::string_view model_name, mlir::ModuleOp module) {\n mlir::Builder builder(module.getContext());\n std::vector ops;\n module->walk([&](mlir::TF::PwStreamResultsOp op) { ops.push_back(op); });\n if (ops.empty()) {\n return std::nullopt;\n }\n auto& stream_interface = GetGlobalStreamCallbackRegistry().stream_interface();\n auto controller_address = stream_interface.controller_address();\n auto controller_address_attr = builder.getStringAttr(controller_address);\n auto model_name_attr = builder.getStringAttr(model_name);\n const StreamCallbackId callback_id(\n static_cast(tsl::random::New64()));\n auto callback_id_attr = builder.getI64IntegerAttr(callback_id.id);\n for (auto op : ops) {\n op->setAttr(\"_controller_address\", controller_address_attr);\n op->setAttr(\"_model_name\", model_name_attr);\n op->setAttr(\"_callback_id\", callback_id_attr);\n }\n return callback_id;\n}\nabsl::Status StreamCallbackRegistry::CallbackState::Invoke(\n tsl::thread::ThreadPoolInterface* thread_pool, StreamedResult result) {\n {\n absl::MutexLock lock(&mu_);\n if (closed_) {\n return absl::InternalError(\n \"Failed to invole the callback that is closed.\");\n }\n ++num_outstanding_;\n }\n thread_pool->Schedule([this, result = std::move(result)]() mutable {\n InvokeCallback(std::move(result));\n absl::MutexLock lock(&mu_);\n --num_outstanding_;\n });\n return absl::OkStatus();\n}\nvoid StreamCallbackRegistry::CallbackState::Close() {\n {\n absl::MutexLock lock(&mu_);\n closed_ = true;\n auto not_running = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) {\n return num_outstanding_ == 0;\n };\n mu_.Await(absl::Condition(&not_running));\n }\n}\nvoid StreamCallbackRegistry::CallbackState::InvokeCallback(\n StreamedResult result) {\n absl::Duration dequeue_latency = absl::Now() - result.enqueued_time;\n interface().RecordDequeueLatency(model_name_, dequeue_latency);\n tsl::profiler::TraceMe trace_me(\"StreamCallbackInvocation\");\n trace_me.AppendMetadata([&]() {\n return tsl::profiler::TraceMeEncode({\n {\"callback_id\", callback_id_.id},\n {\"step_id\", step_id_.id},\n });\n });\n absl::Time start_time = absl::Now();\n callback_(std::move(result.tensors));\n interface().RecordCallbackLatency(model_name_, absl::Now() - start_time);\n}\nabsl::StatusOr StreamCallbackRegistry::Register(\n absl::string_view model_name, StreamCallbackId callback_id, StepId step_id,\n absl::AnyInvocable<\n void(absl::flat_hash_map)>\n callback) {\n absl::MutexLock l(&mu_);\n const auto [it, inserted] =\n stream_callbacks_.insert({std::make_pair(callback_id, step_id), nullptr});\n if (!inserted) {\n return absl::AlreadyExistsError(absl::StrCat(\n \"Stream callback \", callback_id, \" @ \", step_id, \" already exists\"));\n }\n it->second = std::make_unique(this, model_name, callback_id,\n step_id, std::move(callback));\n return ScopedStreamCallback(this, callback_id, step_id);\n}\nabsl::Status StreamCallbackRegistry::Invoke(\n tsl::thread::ThreadPoolInterface* thread_pool, StreamCallbackId callback_id,\n StepId step_id, StreamedResult result) {\n absl::MutexLock lock(&mu_);\n auto iter = stream_callbacks_.find({callback_id, step_id});\n if (iter == stream_callbacks_.end()) {\n return absl::NotFoundError(absl::StrCat(\n \"Stream callback \", callback_id, \" @ \", step_id,\n \" does not exist; this usually indicates that a streaming signature \"\n \"was called by a non-streaming request\"));\n }\n auto* state = iter->second.get();\n DCHECK(state);\n return state->Invoke(thread_pool, std::move(result));\n}\nstd::unique_ptr\nStreamCallbackRegistry::Unregister(StreamCallbackId callback_id,\n StepId step_id) {\n absl::MutexLock l(&mu_);\n const auto it = stream_callbacks_.find({callback_id, step_id});\n if (it == stream_callbacks_.end()) {\n return nullptr;\n }\n auto state = std::move(it->second);\n stream_callbacks_.erase(it);\n return state;\n}\nScopedStreamCallback::ScopedStreamCallback(ScopedStreamCallback&& other)\n : registry_(other.registry_),\n callback_id_(other.callback_id_),\n step_id_(other.step_id_) {\n other.callback_id_ = std::nullopt;\n other.step_id_ = StepId::GetInvalidStepId();\n}\nScopedStreamCallback& ScopedStreamCallback::operator=(\n ScopedStreamCallback&& other) {\n Unregister();\n registry_ = other.registry_;\n callback_id_ = other.callback_id_;\n step_id_ = other.step_id_;\n other.callback_id_ = std::nullopt;\n other.step_id_ = StepId::GetInvalidStepId();\n return *this;\n}\nvoid ScopedStreamCallback::Unregister() {\n if (!callback_id_.has_value()) {\n return;\n }\n tsl::profiler::TraceMe trace_me(\"ScopedStreamCallback::Unregister\");\n trace_me.AppendMetadata([&]() {\n return tsl::profiler::TraceMeEncode({\n {\"callback_id\", callback_id_->id},\n {\"step_id\", step_id_.id},\n });\n });\n DCHECK(registry_);\n auto state = registry_->Unregister(*callback_id_, step_id_);\n DCHECK(state);\n state->Close();\n callback_id_.reset();\n}\nStreamInterfaceFactory& GetGlobalStreamInterfaceFactory() {\n static auto* stream_interface_factory = new StreamInterfaceFactory;\n return *stream_interface_factory;\n}\nStreamCallbackRegistry& GetGlobalStreamCallbackRegistry() {\n static auto* stream_callback_registry =\n new StreamCallbackRegistry(GetGlobalStreamInterfaceFactory()\n .CreateControllerStreamInterface()\n .value());\n return *stream_callback_registry;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/tfrt/runtime/stream.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/memory/memory.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/clock.h\"\n#include \"absl/time/time.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/tfrt/runtime/step_id.h\"\n#include \"tensorflow/core/tfrt/saved_model/saved_model_testutil.h\"\n#include \"tensorflow/core/tfrt/utils/thread_pool.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace tensorflow {\nnamespace tfrt_stub {\nnamespace {\nusing ::tensorflow::test::AsTensor;\nusing ::testing::AnyOf;\nusing ::testing::ElementsAreArray;\nusing ::testing::Pair;\nusing ::testing::UnorderedElementsAre;\nusing ::testing::status::StatusIs;\nTEST(StreamTest, Simple) {\n StreamCallbackId callback_id(1234);\n StepId step_id(5678);\n std::vector> outputs;\n ScopedStreamCallback scoped_stream_callback;\n {\n TF_ASSERT_OK_AND_ASSIGN(\n scoped_stream_callback,\n GetGlobalStreamCallbackRegistry().Register(\n \"test_model\", callback_id, step_id,\n [&](absl::flat_hash_map arg) {\n outputs.push_back(std::move(arg));\n }));\n std::vector> expected =\n {{{\"a\", AsTensor({100})}, {\"b\", AsTensor({200})}},\n {{\"c\", AsTensor({300})}}};\n auto thread = absl::WrapUnique(tsl::Env::Default()->StartThread(\n tsl::ThreadOptions(), \"fake_stream_client\", [&]() {\n for (const auto& map : expected) {\n TfThreadPool thread_pool(\"test\", 4);\n CHECK_OK(GetGlobalStreamCallbackRegistry().Invoke(\n &thread_pool, callback_id, step_id, {map, absl::Now()}));\n }\n }));\n }\n EXPECT_EQ(outputs.size(), 2);\n EXPECT_THAT(GetTfTensorData(outputs[0][\"a\"]),\n ElementsAreArray({100}));\n EXPECT_THAT(GetTfTensorData(outputs[0][\"b\"]),\n ElementsAreArray({200}));\n EXPECT_THAT(GetTfTensorData(outputs[1][\"c\"]),\n ElementsAreArray({300}));\n ScopedStreamCallback scoped_stream_callback_copy;\n scoped_stream_callback_copy = std::move(scoped_stream_callback);\n auto status = GetGlobalStreamCallbackRegistry().Register(\n \"test_model\", callback_id, step_id,\n [&](absl::flat_hash_map arg) {\n outputs.push_back(std::move(arg));\n });\n EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists));\n}\nTEST(StreamTest, MultipleWriters) {\n StreamCallbackId callback_id(1234);\n StepId step_id(5678);\n std::vector>> outputs;\n {\n TfThreadPool thread_pool(\"test\", 4);\n TF_ASSERT_OK_AND_ASSIGN(\n auto scoped_stream_callback,\n GetGlobalStreamCallbackRegistry().Register(\n \"test_model\", callback_id, step_id,\n [&](absl::flat_hash_map arg) {\n absl::flat_hash_map> out;\n for (const auto& p : arg) {\n out[p.first] = GetTfTensorData(p.second);\n }\n outputs.push_back(std::move(out));\n }));\n std::vector> expected =\n {{{\"a\", AsTensor({100})}, {\"b\", AsTensor({200})}},\n {{\"c\", AsTensor({300})}}};\n for (const auto& p : expected) {\n tsl::Env::Default()->SchedClosure([&, callback_id, step_id, p]() {\n TfThreadPool thread_pool(\"test\", 4);\n GetGlobalStreamCallbackRegistry()\n .Invoke(&thread_pool, callback_id, step_id, {p, absl::Now()})\n .IgnoreError();\n });\n }\n absl::SleepFor(absl::Microseconds(100));\n }\n LOG(INFO) << \"StreamCallback receives \" << outputs.size() << \" outputs.\";\n for (const auto& output : outputs) {\n EXPECT_THAT(\n output,\n AnyOf(UnorderedElementsAre(Pair(\"a\", ElementsAreArray({100})),\n Pair(\"b\", ElementsAreArray({200}))),\n UnorderedElementsAre(Pair(\"c\", ElementsAreArray({300})))));\n }\n}\nclass TestStreamControllerInterface : public StreamControllerInterface {\n public:\n TestStreamControllerInterface()\n : StreamControllerInterface(\"test_controller_address\") {}\n};\nTEST(StreamControllerInterface, Initialize) {\n GetGlobalStreamInterfaceFactory().RegisterController(\n []() { return std::make_unique(); });\n TF_ASSERT_OK_AND_ASSIGN(\n auto controller_interface,\n GetGlobalStreamInterfaceFactory().CreateControllerStreamInterface());\n EXPECT_EQ(controller_interface->controller_address(),\n \"test_controller_address\");\n}\nclass TestStreamWorkerInterface : public StreamWorkerInterface {\n public:\n explicit TestStreamWorkerInterface(std::string worker_address)\n : StreamWorkerInterface(worker_address) {}\n absl::Status InvokeStreamCallback(\n const StreamCallbackId& callback_id,\n const std::vector& names,\n const std::vector>>&\n responses) override {\n return absl::OkStatus();\n }\n};\nTEST(StreamWorkerInterface, Initialize) {\n GetGlobalStreamInterfaceFactory().RegisterWorker(\n [](absl::string_view address)\n -> absl::StatusOr> {\n return std::make_unique(\n \"test_worker_address\");\n });\n TF_ASSERT_OK_AND_ASSIGN(\n auto worker_interface,\n GetGlobalStreamInterfaceFactory().CreateWorkerStreamInterface()(\n \"test_worker_address\"));\n EXPECT_EQ(worker_interface->controller_address(), \"test_worker_address\");\n}\nTEST(StepId, Generate) {\n StepId step_id(1234);\n EXPECT_EQ(step_id.id, 1234);\n StepIdGenerator step_id_generator;\n EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(1));\n EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(2));\n EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(3));\n}\nTEST(StepId, GlobalInitial) {\n EXPECT_EQ(GetGlobalInitialStepId(), 0);\n TEST_ScopedInitialStepId test_id(127);\n EXPECT_EQ(GetGlobalInitialStepId(), 127);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":276,"cells":{"ID":{"kind":"string","value":"748cb136-f55b-4f82-b4cb-af329ea655c6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"sharding_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/sharding_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/sharding_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/sharding_propagation.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/base/attributes.h\"\n#include \"absl/base/call_once.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/types/span.h\"\n#include \"xla/array.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/ir/hlo_sharding.h\"\n#include \"xla/hlo/ir/hlo_sharding_metadata.h\"\n#include \"xla/hlo/utils/hlo_sharding_util.h\"\n#include \"xla/protobuf_util.h\"\n#include \"xla/service/dot_as_convolution_util.h\"\n#include \"xla/service/host_memory_offload_annotations.h\"\n#include \"xla/service/spmd/shard_barrier_partitioner.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_tree.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/sharding_op_util.h\"\n#include \"xla/status_macros.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace {\nstd::optional ReturnImprovedSharding(\n HloSharding sharding, HloInstruction* instruction,\n bool may_combine_partial_sharding,\n bool allow_aggressive_resharding = false) {\n return hlo_sharding_util::ReturnImprovedShardingImpl(\n std::move(sharding),\n instruction->has_sharding() ? &instruction->sharding() : nullptr,\n instruction->shape(), may_combine_partial_sharding,\n allow_aggressive_resharding);\n}\nstd::optional ReturnImprovedSubSharding(\n HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,\n bool may_combine_partial_sharding,\n bool allow_aggressive_resharding = false) {\n if (instruction->has_sharding()) {\n const HloSharding to_improved =\n instruction->sharding().GetSubSharding(instruction->shape(), index);\n return hlo_sharding_util::ReturnImprovedShardingImpl(\n std::move(sharding), &to_improved,\n ShapeUtil::GetSubshape(instruction->shape(), index),\n may_combine_partial_sharding, allow_aggressive_resharding);\n } else {\n return hlo_sharding_util::ReturnImprovedShardingImpl(\n std::move(sharding), nullptr,\n ShapeUtil::GetSubshape(instruction->shape(), index),\n may_combine_partial_sharding, allow_aggressive_resharding);\n }\n}\nbool MaybeImproveInstructionSharding(HloSharding sharding,\n HloInstruction* instruction,\n bool may_combine_partial_sharding,\n bool allow_aggressive_resharding = false) {\n if (auto new_sharding = ReturnImprovedSharding(\n std::move(sharding), instruction, may_combine_partial_sharding,\n allow_aggressive_resharding)) {\n instruction->set_sharding(std::move(*new_sharding));\n return true;\n }\n return false;\n}\nbool MaybeImproveInstructionSubSharding(\n HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,\n bool may_combine_partial_sharding,\n bool allow_aggressive_resharding = false) {\n if (instruction->shape().IsTuple()) {\n if (auto new_sub_sharding = ReturnImprovedSubSharding(\n std::move(sharding), instruction, index,\n may_combine_partial_sharding, allow_aggressive_resharding)) {\n HloSharding new_sharding =\n instruction->has_sharding()\n ? instruction->sharding()\n : HloSharding::Single(instruction->shape(),\n HloSharding::Replicate());\n ShapeTree sharding_shape_tree =\n new_sharding.GetAsShapeTree(instruction->shape());\n *sharding_shape_tree.mutable_element(index) = new_sub_sharding.value();\n instruction->set_sharding(HloSharding::Tuple(sharding_shape_tree));\n return true;\n } else {\n return false;\n }\n }\n CHECK(index.size() == 1 && index[0] == 0);\n return MaybeImproveInstructionSharding(std::move(sharding), instruction,\n may_combine_partial_sharding,\n allow_aggressive_resharding);\n}\nbool IsConvolutionKernelSmall(const HloInstruction* instruction) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kConvolution);\n const HloInstruction* rhs = instruction->operand(1);\n const auto& dnums = instruction->convolution_dimension_numbers();\n int64_t kernel_dim_prod = 1;\n int64_t output_dim_prod = 1;\n for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {\n int64_t kernel_dim =\n rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i));\n kernel_dim_prod *= kernel_dim;\n int64_t output_dim =\n instruction->shape().dimensions(dnums.output_spatial_dimensions(i));\n output_dim_prod *= output_dim;\n if (kernel_dim >= output_dim &&\n (i < 2 || kernel_dim > 3 || kernel_dim_prod >= output_dim_prod)) {\n return false;\n }\n }\n return true;\n}\nbool IsPassthroughCustomOps(const HloInstruction* hlo) {\n if (hlo->IsCustomCall({\"Sharding\", \"X64Combine\", \"LayoutConstraint\"})) {\n return true;\n }\n if (hlo->operand_count() != 1 || !hlo->shape().IsArray() ||\n !hlo->operand(0)->shape().IsArray() ||\n hlo->operand(0)->shape().rank() != hlo->shape().rank()) {\n return false;\n }\n return hlo->IsCustomCall(\n {\"ResizeNearest\", \"ResizeBilinear\", \"ResizeNearestGrad\",\n \"ResizeBilinearGrad\", \"Cholesky\",\n host_memory_offload_annotations::kMoveToDeviceCustomCallTarget,\n host_memory_offload_annotations::kMoveToHostCustomCallTarget});\n}\nconst HloInstruction* PickRepresentativeOperand(\n const HloInstruction* instruction) {\n switch (instruction->opcode()) {\n case HloOpcode::kMap:\n case HloOpcode::kPad:\n case HloOpcode::kPower:\n case HloOpcode::kOptimizationBarrier:\n case HloOpcode::kReverse:\n case HloOpcode::kSlice:\n case HloOpcode::kShiftLeft:\n case HloOpcode::kShiftRightArithmetic:\n case HloOpcode::kShiftRightLogical:\n if (instruction->operand(0)->has_sharding()) {\n return instruction->operand(0);\n }\n return nullptr;\n case HloOpcode::kAbs:\n case HloOpcode::kAdd:\n case HloOpcode::kAnd:\n case HloOpcode::kAtan2:\n case HloOpcode::kBitcastConvert:\n case HloOpcode::kCeil:\n case HloOpcode::kClamp:\n case HloOpcode::kClz:\n case HloOpcode::kCompare:\n case HloOpcode::kComplex:\n case HloOpcode::kConcatenate:\n case HloOpcode::kConvert:\n case HloOpcode::kCopy:\n case HloOpcode::kCos:\n case HloOpcode::kAllGather:\n case HloOpcode::kAllReduce:\n case HloOpcode::kReduceScatter:\n case HloOpcode::kAllToAll:\n case HloOpcode::kCollectiveBroadcast:\n case HloOpcode::kCollectivePermute:\n case HloOpcode::kDivide:\n case HloOpcode::kErf:\n case HloOpcode::kExp:\n case HloOpcode::kExpm1:\n case HloOpcode::kFloor:\n case HloOpcode::kImag:\n case HloOpcode::kIsFinite:\n case HloOpcode::kLog:\n case HloOpcode::kLog1p:\n case HloOpcode::kLogistic:\n case HloOpcode::kMaximum:\n case HloOpcode::kMinimum:\n case HloOpcode::kMultiply:\n case HloOpcode::kNegate:\n case HloOpcode::kNot:\n case HloOpcode::kOr:\n case HloOpcode::kPopulationCount:\n case HloOpcode::kReal:\n case HloOpcode::kReducePrecision:\n case HloOpcode::kRemainder:\n case HloOpcode::kRoundNearestAfz:\n case HloOpcode::kRoundNearestEven:\n case HloOpcode::kRsqrt:\n case HloOpcode::kSelect:\n case HloOpcode::kSign:\n case HloOpcode::kSin:\n case HloOpcode::kTopK:\n case HloOpcode::kSort:\n case HloOpcode::kSqrt:\n case HloOpcode::kCbrt:\n case HloOpcode::kSubtract:\n case HloOpcode::kStochasticConvert:\n case HloOpcode::kTan:\n case HloOpcode::kTanh:\n case HloOpcode::kWhile:\n case HloOpcode::kXor: {\n const HloInstruction* best_operand = nullptr;\n for (const HloInstruction* operand : instruction->operands()) {\n if (operand->has_sharding() &&\n (best_operand == nullptr ||\n hlo_sharding_util::IsShardingMoreSpecific(\n operand->sharding(), best_operand->sharding()))) {\n best_operand = operand;\n }\n }\n return best_operand;\n }\n case HloOpcode::kCustomCall: {\n if (IsPassthroughCustomOps(instruction)) {\n return instruction->operand(0);\n }\n return nullptr;\n }\n case HloOpcode::kAddDependency:\n case HloOpcode::kAfterAll:\n case HloOpcode::kAsyncStart:\n case HloOpcode::kAsyncUpdate:\n case HloOpcode::kAsyncDone:\n case HloOpcode::kAllGatherStart:\n case HloOpcode::kAllGatherDone:\n case HloOpcode::kAllReduceStart:\n case HloOpcode::kAllReduceDone:\n case HloOpcode::kBatchNormGrad:\n case HloOpcode::kBatchNormInference:\n case HloOpcode::kBatchNormTraining:\n case HloOpcode::kBitcast:\n case HloOpcode::kBroadcast:\n case HloOpcode::kCall:\n case HloOpcode::kCholesky:\n case HloOpcode::kCollectivePermuteDone:\n case HloOpcode::kCollectivePermuteStart:\n case HloOpcode::kConditional:\n case HloOpcode::kConstant:\n case HloOpcode::kConvolution:\n case HloOpcode::kCopyDone:\n case HloOpcode::kCopyStart:\n case HloOpcode::kDomain:\n case HloOpcode::kDot:\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n case HloOpcode::kDynamicReshape:\n case HloOpcode::kFft:\n case HloOpcode::kFusion:\n case HloOpcode::kGather:\n case HloOpcode::kGetTupleElement:\n case HloOpcode::kInfeed:\n case HloOpcode::kIota:\n case HloOpcode::kOutfeed:\n case HloOpcode::kParameter:\n case HloOpcode::kPartitionId:\n case HloOpcode::kRecv:\n case HloOpcode::kRecvDone:\n case HloOpcode::kReduce:\n case HloOpcode::kReduceWindow:\n case HloOpcode::kReplicaId:\n case HloOpcode::kReshape:\n case HloOpcode::kRng:\n case HloOpcode::kRngGetAndUpdateState:\n case HloOpcode::kRngBitGenerator:\n case HloOpcode::kScatter:\n case HloOpcode::kSelectAndScatter:\n case HloOpcode::kSend:\n case HloOpcode::kSendDone:\n case HloOpcode::kTranspose:\n case HloOpcode::kTriangularSolve:\n case HloOpcode::kTuple:\n case HloOpcode::kGetDimensionSize:\n case HloOpcode::kSetDimensionSize:\n return nullptr;\n }\n}\nbool SupportSpatialPartitioning(\n const HloInstruction* instruction,\n const ShardingPropagation::ComputationMap& computation_map, bool is_spmd,\n bool allow_spmd_sharding_propagation_to_output,\n bool allow_spmd_sharding_propagation_to_parameters,\n const CustomCallShardingHelper* sharding_helper) {\n const bool is_entry_root = instruction->parent()\n ->parent()\n ->entry_computation()\n ->root_instruction() == instruction;\n if (instruction->parent()->root_instruction() == instruction &&\n computation_map.find(instruction->parent()) == computation_map.end() &&\n !(is_entry_root && allow_spmd_sharding_propagation_to_output)) {\n return false;\n }\n if (instruction->IsElementwise() &&\n (instruction->opcode() != HloOpcode::kRng || is_spmd)) {\n return true;\n }\n switch (instruction->opcode()) {\n case HloOpcode::kBroadcast:\n case HloOpcode::kConcatenate:\n case HloOpcode::kConditional:\n case HloOpcode::kConstant:\n case HloOpcode::kConvolution:\n case HloOpcode::kOptimizationBarrier:\n case HloOpcode::kDot:\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice:\n case HloOpcode::kGather:\n case HloOpcode::kGetTupleElement:\n case HloOpcode::kInfeed:\n case HloOpcode::kIota:\n case HloOpcode::kPad:\n case HloOpcode::kReduceWindow:\n case HloOpcode::kReshape:\n case HloOpcode::kScatter:\n case HloOpcode::kSelectAndScatter:\n case HloOpcode::kSlice:\n case HloOpcode::kSort:\n case HloOpcode::kTranspose:\n case HloOpcode::kTuple:\n case HloOpcode::kWhile:\n case HloOpcode::kReduce:\n case HloOpcode::kRngBitGenerator:\n case HloOpcode::kAllReduce:\n case HloOpcode::kReduceScatter:\n return true;\n case HloOpcode::kParameter:\n return allow_spmd_sharding_propagation_to_parameters ||\n computation_map.find(instruction->parent()) !=\n computation_map.end();\n case HloOpcode::kReverse:\n return is_spmd;\n case HloOpcode::kCustomCall:\n if (!is_spmd) {\n return false;\n }\n if (auto* partitioner =\n GetCustomCallPartitioner(instruction->custom_call_target())) {\n return partitioner->IsCustomCallShardable(instruction);\n }\n return (IsPassthroughCustomOps(instruction) ||\n sharding_helper->IsCustomCallShardable(instruction));\n default:\n return false;\n }\n}\nstd::optional LookaheadUserSharding(HloInstruction* instr,\n bool is_spmd,\n const CallGraph& call_graph) {\n if (instr->user_count() != 1) {\n return std::nullopt;\n }\n HloInstruction* current_user = instr->users()[0];\n std::optional sharding;\n std::vector users_chain = {instr, current_user};\n while (!current_user->has_sharding()) {\n if (current_user->users().size() != 1) {\n users_chain.clear();\n break;\n }\n current_user = current_user->users()[0];\n users_chain.push_back(current_user);\n }\n if (users_chain.empty()) {\n return std::nullopt;\n }\n for (int i = users_chain.size() - 1; i >= 1; --i) {\n HloInstruction* user = users_chain[i];\n HloInstruction* current = users_chain[i - 1];\n CHECK(user->has_sharding());\n sharding = ShardingPropagation::GetShardingFromUser(\n *current, *user, INT64_MAX, is_spmd, call_graph,\n nullptr);\n if (sharding.has_value() && i != 1) {\n current->set_sharding(*sharding);\n continue;\n }\n break;\n }\n for (int i = 1; i < users_chain.size() - 1; ++i) {\n users_chain[i]->clear_sharding();\n }\n return sharding;\n}\nbool InferGatherParallelShardingFromOperands(\n HloInstruction* instruction,\n const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,\n bool may_combine_partial_sharding) {\n CHECK(DynCast(instruction));\n bool changed = false;\n auto output_parallel_dims = hlo_sharding_util::GetGatherParallelOutputDims(\n *instruction, parallel_dims);\n if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {\n changed |= MaybeImproveInstructionSharding(\n hlo_sharding_util::\n InferGatherScatterParallelShardingFromOperandSharding(\n instruction->operand(0)->sharding(), instruction->shape(),\n absl::MakeConstSpan(parallel_dims.operand_parallel_dims),\n absl::MakeConstSpan(output_parallel_dims)),\n instruction, may_combine_partial_sharding);\n }\n if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {\n changed |= MaybeImproveInstructionSharding(\n hlo_sharding_util::\n InferGatherScatterParallelShardingFromOperandSharding(\n instruction->operand(1)->sharding(), instruction->shape(),\n absl::MakeConstSpan(parallel_dims.indices_parallel_dims),\n absl::MakeConstSpan(output_parallel_dims)),\n instruction, may_combine_partial_sharding);\n }\n return changed;\n}\nbool InferScatterParallelShardingFromOperands(\n HloInstruction* instruction,\n const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,\n bool may_combine_partial_sharding) {\n HloScatterInstruction* scatter = DynCast(instruction);\n CHECK(scatter);\n const int64_t operand_count = scatter->scatter_operand_count();\n auto scatter_operands = scatter->scatter_operands();\n auto scatter_indices = scatter->scatter_indices();\n auto scatter_updates = scatter->scatter_updates();\n bool changed = false;\n auto update_parallel_dims = hlo_sharding_util::GetScatterParallelUpdateDims(\n *instruction, parallel_dims);\n Shape shape = operand_count == 1\n ? instruction->shape()\n : ShapeUtil::GetSubshape(instruction->shape(), {0});\n for (int64_t i = 0; i != operand_count; ++i) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {\n changed |= MaybeImproveInstructionSubSharding(\n hlo_sharding_util::\n InferGatherScatterParallelShardingFromOperandSharding(\n scatter_operands[i]->sharding(), shape,\n absl::MakeConstSpan(parallel_dims.operand_parallel_dims),\n absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),\n instruction, {i}, may_combine_partial_sharding);\n }\n }\n if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)) {\n auto parallel_sharding_from_indices = hlo_sharding_util::\n InferGatherScatterParallelShardingFromOperandSharding(\n scatter_indices->sharding(), shape,\n absl::MakeConstSpan(parallel_dims.indices_parallel_dims),\n absl::MakeConstSpan(parallel_dims.operand_parallel_dims));\n for (int64_t i = 0; i != operand_count; ++i) {\n changed |= MaybeImproveInstructionSubSharding(\n parallel_sharding_from_indices, instruction, {i},\n may_combine_partial_sharding);\n }\n }\n for (int64_t i = 0; i != operand_count; ++i) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {\n changed |= MaybeImproveInstructionSubSharding(\n hlo_sharding_util::\n InferGatherScatterParallelShardingFromOperandSharding(\n scatter_updates[i]->sharding(), shape,\n absl::MakeConstSpan(update_parallel_dims),\n absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),\n instruction, {i}, may_combine_partial_sharding);\n }\n }\n return changed;\n}\nbool CanPropagateThroughAtAggressiveLevel(const HloInstruction& inst,\n int64_t aggressiveness) {\n if (aggressiveness < 1 &&\n !(inst.IsElementwise() || inst.IsCustomCall(\"Sharding\")) &&\n inst.opcode() != HloOpcode::kTranspose &&\n inst.opcode() != HloOpcode::kReshape &&\n inst.opcode() != HloOpcode::kTuple &&\n inst.opcode() != HloOpcode::kGetTupleElement &&\n inst.opcode() != HloOpcode::kWhile &&\n inst.opcode() != HloOpcode::kDynamicSlice &&\n inst.opcode() != HloOpcode::kDynamicUpdateSlice &&\n inst.opcode() != HloOpcode::kOptimizationBarrier &&\n inst.opcode() != HloOpcode::kConcatenate &&\n inst.opcode() != HloOpcode::kCall && inst.opcode() != HloOpcode::kCopy) {\n return false;\n }\n if (aggressiveness < 2 && inst.opcode() == HloOpcode::kBroadcast) {\n return false;\n }\n return true;\n}\nbool SameShardingMetadata(const HloSharding& a, const HloSharding& b) {\n DCHECK_EQ(a, b);\n auto same_metadata = [](absl::Span a,\n absl::Span b) {\n if (a.size() != b.size()) return false;\n for (int i = 0, e = a.size(); i < e; ++i) {\n if (!protobuf_util::ProtobufEquals(a[i], b[i])) {\n return false;\n }\n }\n return true;\n };\n if (a.IsTuple()) {\n for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) {\n if (!same_metadata(a.tuple_elements()[i].metadata(),\n b.tuple_elements()[i].metadata())) {\n return false;\n }\n }\n return true;\n } else {\n return same_metadata(a.metadata(), b.metadata());\n }\n}\nbool AssignShardingMetadata(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* computation : module->computations(execution_threads)) {\n for (HloInstruction* instruction : computation->instructions()) {\n const auto& metadata = instruction->metadata();\n if (!instruction->has_sharding() || metadata.ByteSizeLong() == 0) {\n continue;\n }\n HloSharding sharding_with_metadata =\n instruction->sharding().WithMetadata({metadata},\n false);\n if (!SameShardingMetadata(instruction->sharding(),\n sharding_with_metadata)) {\n instruction->set_sharding(std::move(sharding_with_metadata));\n changed = true;\n }\n }\n }\n return changed;\n}\nbool RemoveShardingMetadata(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* computation : module->computations(execution_threads)) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (!instruction->has_sharding()) {\n continue;\n }\n HloSharding sharding_no_metadata =\n instruction->sharding().WithoutMetadata();\n if (!SameShardingMetadata(instruction->sharding(),\n sharding_no_metadata)) {\n instruction->set_sharding(std::move(sharding_no_metadata));\n changed = true;\n }\n }\n }\n return changed;\n}\nabsl::Status CheckAndUpdateDeviceAssignmentsInWhileBody(\n HloInstruction* while_instruction) {\n auto bad_status = [](HloInstruction* instruction, int64_t device,\n HloInstruction* channel_instruction,\n int64_t correct_device) {\n return FailedPrecondition(\n \"Instruction: %s is on device: %d, which conflicts with device: %d \"\n \"of channel instruction: %s\",\n instruction->name(), device, correct_device,\n channel_instruction->name());\n };\n CHECK_EQ(while_instruction->opcode(), HloOpcode::kWhile);\n HloComputation* while_body = while_instruction->while_body();\n std::map devices_to_instructions;\n std::optional unique_device = std::nullopt;\n HloInstruction* channel_instruction = nullptr;\n for (HloInstruction* instruction : while_body->instructions()) {\n if (instruction->sharding_unique_device()) {\n auto opcode = instruction->opcode();\n int64_t device = *instruction->sharding_unique_device();\n if (unique_device.has_value()) {\n if (*unique_device != device) {\n return bad_status(instruction, device, channel_instruction,\n *unique_device);\n }\n } else if (((opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv) &&\n !Cast(instruction)\n ->is_host_transfer())\n || ((opcode == HloOpcode::kAllReduce ||\n opcode == HloOpcode::kReduceScatter) &&\n instruction->channel_id())) {\n channel_instruction = instruction;\n unique_device = device;\n if (!devices_to_instructions.empty()) {\n for (auto it = devices_to_instructions.begin();\n it != devices_to_instructions.end(); ++it) {\n if (*unique_device != it->first) {\n return bad_status(it->second, it->first, channel_instruction,\n *unique_device);\n }\n }\n }\n } else {\n devices_to_instructions[device] = instruction;\n }\n }\n }\n if (unique_device.has_value()) {\n auto while_device = while_instruction->sharding_unique_device();\n if (while_device.has_value() && *unique_device != *while_device) {\n return bad_status(while_instruction, *while_device, channel_instruction,\n *unique_device);\n }\n auto body_root = while_body->root_instruction();\n auto root_device = body_root->sharding_unique_device();\n if (!root_device.has_value()) {\n body_root->set_device_sharding(*unique_device);\n } else if (*unique_device != *root_device) {\n return bad_status(body_root, *root_device, channel_instruction,\n *unique_device);\n }\n }\n return absl::OkStatus();\n}\nbool RefineManualAutoShardingFromAuto(\n const HloSharding& to_merge, absl::Span unspecified_dims,\n HloSharding* auto_sharding, HloSharding* manual_sharding) {\n if (!manual_sharding->IsManualSubgroup() ||\n auto_sharding->IsManualSubgroup() ||\n !manual_sharding->HasPartialReplication() ||\n manual_sharding->subgroup_types().size() != 2) {\n return false;\n }\n HloSharding partial_rep =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n to_merge, unspecified_dims);\n if (partial_rep.IsTileMaximal()) {\n return false;\n }\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,\n auto_sharding)) {\n return false;\n }\n const int64_t data_rank = partial_rep.TiledDataRank();\n std::vector partial_manual_shape(\n partial_rep.tile_assignment().dimensions().begin(),\n partial_rep.tile_assignment().dimensions().end());\n partial_manual_shape.insert(partial_manual_shape.begin() + data_rank, 1);\n auto partial_tiling_for_manual =\n partial_rep.tile_assignment().Reshape(partial_manual_shape);\n HloSharding partial_rep_for_manual = HloSharding::PartialTile(\n partial_tiling_for_manual, partial_rep.metadata());\n auto man_tiling = manual_sharding->tile_assignment();\n if (manual_sharding->subgroup_types().back() != OpSharding::REPLICATED) {\n std::vector transposed_dims(man_tiling.num_dimensions());\n absl::c_iota(transposed_dims, 0);\n std::swap(transposed_dims.back(), transposed_dims[data_rank]);\n man_tiling = man_tiling.Transpose(transposed_dims);\n }\n HloSharding tmp_sharding_for_merging = HloSharding::PartialTile(\n std::move(man_tiling), manual_sharding->metadata());\n if (!hlo_sharding_util::MergeShardingIfCompatible(\n partial_rep_for_manual, &tmp_sharding_for_merging)) {\n return false;\n }\n std::vector subgroup_types;\n subgroup_types.push_back(OpSharding::MANUAL);\n if (tmp_sharding_for_merging.HasPartialReplication()) {\n subgroup_types.push_back(OpSharding::REPLICATED);\n }\n *manual_sharding = HloSharding::Subgroup(\n tmp_sharding_for_merging.tile_assignment(), subgroup_types,\n tmp_sharding_for_merging.metadata());\n return true;\n}\nbool RefineManualAutoShardingFromManual(\n const HloSharding& to_merge, absl::Span unspecified_dims,\n HloSharding* auto_sharding, HloSharding* manual_sharding) {\n if (!to_merge.IsManualSubgroup() || !manual_sharding->IsManualSubgroup() ||\n !manual_sharding->HasPartialReplication() ||\n auto_sharding->IsManualSubgroup() ||\n manual_sharding->subgroup_types().size() != 2) {\n return false;\n }\n HloSharding partial_rep =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n to_merge, unspecified_dims);\n if (partial_rep.IsTileMaximal()) {\n return false;\n }\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,\n manual_sharding)) {\n return false;\n }\n HloSharding partial_rep_for_auto = HloSharding::Subgroup(\n partial_rep.tile_assignment(),\n std::vector(partial_rep.subgroup_types().size(),\n OpSharding::REPLICATED),\n partial_rep.metadata());\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep_for_auto,\n auto_sharding)) {\n return false;\n }\n return true;\n}\nbool InferUnspecifiedDimsFromOperand(HloInstruction* annotate_op,\n absl::Span unspecified_dims,\n HloInstruction** man_conversion_op_after) {\n CHECK(annotate_op->IsCustomCall(\"Sharding\") ||\n annotate_op->opcode() == HloOpcode::kCopy);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(annotate_op->operand(0))) {\n return false;\n }\n const HloSharding& operand_sharding = annotate_op->operand(0)->sharding();\n if (!operand_sharding.IsTiled()) {\n return false;\n }\n HloInstruction* man_conversion_op = nullptr;\n if (annotate_op->user_count() == 1) {\n HloInstruction* user = annotate_op->users()[0];\n if (user->IsCustomCall(\"SPMDFullToShardShape\") ||\n user->IsCustomCall(\"SPMDShardToFullShape\")) {\n std::vector user_unspec_dims;\n if (!sharding_op_util::ParseAttributes(\n Cast(user)->opaque(),\n &user_unspec_dims)\n .ok()) {\n return false;\n }\n absl::c_sort(user_unspec_dims);\n if (unspecified_dims != user_unspec_dims) {\n return false;\n }\n man_conversion_op = user;\n }\n }\n *man_conversion_op_after = man_conversion_op;\n if (man_conversion_op == nullptr) {\n HloSharding partial_replicated =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n operand_sharding, unspecified_dims);\n HloSharding sharding = annotate_op->sharding();\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,\n &sharding)) {\n return false;\n }\n annotate_op->set_sharding(sharding);\n return true;\n }\n if (man_conversion_op->IsCustomCall(\"SPMDFullToShardShape\")) {\n HloSharding auto_sharding = annotate_op->sharding();\n HloSharding manual_sharding = man_conversion_op->sharding();\n if (!RefineManualAutoShardingFromAuto(operand_sharding, unspecified_dims,\n &auto_sharding, &manual_sharding)) {\n return false;\n }\n annotate_op->set_sharding(auto_sharding);\n man_conversion_op->set_sharding(manual_sharding);\n return true;\n }\n CHECK(man_conversion_op->IsCustomCall(\"SPMDShardToFullShape\"));\n HloSharding manual_sharding = annotate_op->sharding();\n HloSharding auto_sharding = man_conversion_op->sharding();\n if (!RefineManualAutoShardingFromManual(operand_sharding, unspecified_dims,\n &auto_sharding, &manual_sharding)) {\n return false;\n }\n annotate_op->set_sharding(manual_sharding);\n man_conversion_op->set_sharding(auto_sharding);\n return true;\n}\nbool InferUnspecifiedDimsFromOneUser(HloInstruction* annotate_op,\n const HloInstruction* user,\n int64_t aggressiveness, bool is_spmd,\n absl::Span unspecified_dims,\n HloInstruction* man_conversion_op,\n const CallGraph& call_graph) {\n CHECK(annotate_op->IsCustomCall(\"Sharding\") ||\n annotate_op->opcode() == HloOpcode::kCopy);\n if (!user->has_sharding() || !user->sharding().IsTiled()) {\n return false;\n }\n std::optional user_sharding =\n ShardingPropagation::GetShardingFromUser(\n man_conversion_op == nullptr ? *annotate_op : *man_conversion_op,\n *user, aggressiveness, is_spmd, call_graph,\n nullptr);\n if (!user_sharding.has_value() || user_sharding->IsTileMaximal()) {\n return false;\n }\n if (man_conversion_op == nullptr) {\n HloSharding partial_replicated =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n *user_sharding, unspecified_dims);\n HloSharding sharding = annotate_op->sharding();\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,\n &sharding)) {\n return false;\n }\n annotate_op->set_sharding(sharding);\n return true;\n }\n if (man_conversion_op->IsCustomCall(\"SPMDFullToShardShape\")) {\n HloSharding auto_sharding = annotate_op->sharding();\n HloSharding manual_sharding = man_conversion_op->sharding();\n if (!RefineManualAutoShardingFromManual(*user_sharding, unspecified_dims,\n &auto_sharding, &manual_sharding)) {\n return false;\n }\n annotate_op->set_sharding(auto_sharding);\n man_conversion_op->set_sharding(manual_sharding);\n return true;\n }\n CHECK(man_conversion_op->IsCustomCall(\"SPMDShardToFullShape\"));\n HloSharding manual_sharding = annotate_op->sharding();\n HloSharding auto_sharding = man_conversion_op->sharding();\n if (!RefineManualAutoShardingFromAuto(*user_sharding, unspecified_dims,\n &auto_sharding, &manual_sharding)) {\n return false;\n }\n annotate_op->set_sharding(manual_sharding);\n man_conversion_op->set_sharding(auto_sharding);\n return true;\n}\nbool InferUnspecifiedDimsFromUsers(HloInstruction* annotate_op,\n absl::Span unspecified_dims,\n int64_t aggressiveness, bool is_spmd,\n HloInstruction** man_conversion_op_after,\n const CallGraph& call_graph) {\n HloInstruction* man_conversion_op = nullptr;\n if (annotate_op->user_count() == 1) {\n HloInstruction* user = annotate_op->users()[0];\n if (user->IsCustomCall(\"SPMDFullToShardShape\") ||\n user->IsCustomCall(\"SPMDShardToFullShape\")) {\n std::vector user_unspec_dims;\n absl::c_sort(user_unspec_dims);\n if (!sharding_op_util::ParseAttributes(\n Cast(user)->opaque(),\n &user_unspec_dims)\n .ok() ||\n unspecified_dims != user_unspec_dims) {\n return false;\n }\n man_conversion_op = user;\n }\n }\n *man_conversion_op_after = man_conversion_op;\n HloInstruction* op_for_users =\n man_conversion_op == nullptr ? annotate_op : man_conversion_op;\n bool changed = false;\n for (HloInstruction* user : op_for_users->users()) {\n changed |= InferUnspecifiedDimsFromOneUser(\n annotate_op, user, aggressiveness, is_spmd, unspecified_dims,\n man_conversion_op, call_graph);\n }\n return changed;\n}\nbool InferUnspecifiedDimsFromShardGroup(\n HloInstruction* annotate_op, absl::Span unspecified_dims,\n const absl::flat_hash_set& shard_group) {\n CHECK(annotate_op->IsCustomCall(\"Sharding\") ||\n annotate_op->opcode() == HloOpcode::kCopy);\n if (annotate_op->IsCustomCall(spmd::kShardBarrierTo)) {\n return false;\n }\n bool changed = false;\n for (const HloInstruction* member : shard_group) {\n if (member == annotate_op) {\n continue;\n }\n if (member->IsCustomCall(spmd::kShardBarrierFrom)) {\n continue;\n }\n if (!hlo_sharding_util::IsSpatiallyPartitioned(member)) {\n continue;\n }\n const HloSharding& member_sharding = member->sharding();\n if (!member_sharding.IsTiled()) {\n continue;\n }\n HloSharding partial_replicated =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n member_sharding, unspecified_dims);\n HloSharding sharding = annotate_op->sharding();\n if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,\n &sharding)) {\n continue;\n }\n annotate_op->set_sharding(sharding);\n changed |= true;\n }\n return changed;\n}\nbool IsCSEPreventionTarget(const HloInstruction* instruction) {\n return instruction->opcode() == HloOpcode::kBroadcast &&\n instruction->operand(0)->shape().rank() == 0;\n}\nHloSharding SetCSEPreventionSharding(const HloSharding& sharding) {\n OpMetadata metadata;\n metadata.set_op_name(\"_sharding_propagation_cse_prevention\");\n return sharding.WithMetadata({metadata}, true);\n}\nbool IsCSEPreventionSharding(const HloSharding& sharding) {\n if (sharding.metadata().size() != 1) {\n return false;\n }\n return sharding.metadata()[0].op_name() ==\n \"_sharding_propagation_cse_prevention\";\n}\n} \nbool InferDotShardingFromOperands(\n HloInstruction* instruction, const CallGraph& call_graph,\n const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,\n bool may_combine_partial_sharding, bool is_spmd) {\n auto from_operand = [&](int64_t operand_index) {\n auto operand = instruction->operand(operand_index);\n const HloSharding& operand_sharding = operand->sharding();\n if (operand_sharding.IsTileMaximal()) {\n return operand_sharding;\n }\n std::vector contracting_dims;\n contracting_dims.reserve(dnums.contracting_dims.size());\n for (const auto& dim : dnums.contracting_dims) {\n contracting_dims.push_back(operand_index == 0 ? dim.lhs : dim.rhs);\n }\n for (const auto& dim : operand_index == 0\n ? dnums.rhs_non_contracting_dims\n : dnums.lhs_non_contracting_dims) {\n int64_t d = operand_index == 0 ? dim.lhs : dim.rhs;\n if (d >= 0) {\n contracting_dims.push_back(d);\n }\n }\n auto replicate_contracting_dims =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n operand_sharding, contracting_dims);\n std::vector out_dims_to_op_perm(instruction->shape().rank(), -1);\n std::vector op_dims_to_output_perm(operand->shape().rank(), -1);\n for (const auto& dim : dnums.batch_dims) {\n out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;\n op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =\n dim.output;\n }\n for (const auto& dim : operand_index == 0\n ? dnums.lhs_non_contracting_dims\n : dnums.rhs_non_contracting_dims) {\n out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;\n op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =\n dim.output;\n }\n return *hlo_sharding_util::TransposeShardingWithCollapsedDims(\n replicate_contracting_dims, op_dims_to_output_perm,\n out_dims_to_op_perm);\n };\n std::optional improved_operand_0;\n std::optional improved_operand_1;\n if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {\n improved_operand_0 = ReturnImprovedSharding(\n from_operand(0), instruction, may_combine_partial_sharding,\n false);\n }\n if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {\n improved_operand_1 = ReturnImprovedSharding(\n from_operand(1), instruction, may_combine_partial_sharding,\n false);\n }\n if (!improved_operand_0.has_value() && !improved_operand_1.has_value()) {\n return false;\n }\n if (improved_operand_0.has_value() && !improved_operand_1.has_value()) {\n instruction->set_sharding(*improved_operand_0);\n return true;\n }\n if (!improved_operand_0.has_value() && improved_operand_1.has_value()) {\n instruction->set_sharding(*improved_operand_1);\n return true;\n }\n CHECK(improved_operand_0.has_value() && improved_operand_1.has_value());\n std::optional lookahead_sharding =\n LookaheadUserSharding(instruction, is_spmd, call_graph);\n std::array sharding_priority = {*improved_operand_0,\n *improved_operand_1};\n bool priority_defined_with_lookahead = false;\n if (lookahead_sharding.has_value()) {\n const bool operand_0_is_lookahead_subtiling =\n hlo_sharding_util::IsSubTilingOrEqualSharding(\n instruction->shape(), *lookahead_sharding, *improved_operand_0);\n const bool operand_1_is_lookahead_subtiling =\n hlo_sharding_util::IsSubTilingOrEqualSharding(\n instruction->shape(), *lookahead_sharding, *improved_operand_1);\n if (operand_0_is_lookahead_subtiling && !operand_1_is_lookahead_subtiling) {\n priority_defined_with_lookahead = true;\n }\n if (!operand_0_is_lookahead_subtiling && operand_1_is_lookahead_subtiling) {\n instruction->set_sharding(*improved_operand_1);\n std::swap(sharding_priority[0], sharding_priority[1]);\n priority_defined_with_lookahead = true;\n }\n }\n if (!priority_defined_with_lookahead &&\n ShapeUtil::ByteSizeOf(instruction->operand(0)->shape()) <\n ShapeUtil::ByteSizeOf(instruction->operand(1)->shape())) {\n std::swap(sharding_priority[0], sharding_priority[1]);\n }\n instruction->set_sharding(sharding_priority[0]);\n MaybeImproveInstructionSharding(sharding_priority[1], instruction,\n may_combine_partial_sharding);\n return true;\n}\nbool InferConvolutionShardingFromOperands(HloInstruction* instruction,\n const CallGraph& call_graph,\n int64_t aggressiveness,\n bool may_combine_partial_sharding,\n bool is_spmd) {\n auto get_partitions_for_dims =\n [&](const HloInstruction* inst,\n absl::Span<\n const dot_as_convolution_util::DotConvolutionDimsInfo::DimNums>\n dims,\n int lhs_or_rhs) {\n int64_t partitions = 1;\n if (!inst->has_sharding()) {\n return partitions;\n }\n const auto& sharding = inst->sharding();\n if (sharding.IsTileMaximal()) {\n return partitions;\n }\n for (const auto& dim : dims) {\n if (lhs_or_rhs == 0) {\n partitions *= sharding.tile_assignment().dim(dim.lhs);\n } else {\n CHECK_EQ(lhs_or_rhs, 1);\n partitions *= sharding.tile_assignment().dim(dim.rhs);\n }\n }\n return partitions;\n };\n auto dot_dims =\n dot_as_convolution_util::ParseConvolutionDimsInfo(instruction);\n const int64_t lhs_conv_spatial_partitions = get_partitions_for_dims(\n instruction->operand(0), dot_dims.conv_spatial_dims, 0);\n const int64_t rhs_conv_spatial_partitions = get_partitions_for_dims(\n instruction->operand(1), dot_dims.conv_spatial_dims, 1);\n if (dot_dims.conv_spatial_dims.empty() ||\n (lhs_conv_spatial_partitions == 1 && rhs_conv_spatial_partitions == 1 &&\n instruction->batch_group_count() == 1 &&\n instruction->feature_group_count() == 1)) {\n return InferDotShardingFromOperands(instruction, call_graph, dot_dims,\n may_combine_partial_sharding, is_spmd);\n }\n const auto& dnums = instruction->convolution_dimension_numbers();\n const HloInstruction* lhs = instruction->operand(0);\n auto get_tiled_sharding_based_on_lhs = [&] {\n CHECK(!lhs->sharding().IsTileMaximal());\n std::vector output_to_lhs_indices(instruction->shape().rank());\n output_to_lhs_indices[dnums.output_batch_dimension()] =\n dnums.input_batch_dimension();\n output_to_lhs_indices[dnums.output_feature_dimension()] =\n dnums.input_feature_dimension();\n for (int64_t i = 0; i < dnums.input_spatial_dimensions_size(); ++i) {\n output_to_lhs_indices[dnums.output_spatial_dimensions(i)] =\n dnums.input_spatial_dimensions(i);\n }\n return hlo_sharding_util::TransposeSharding(lhs->sharding(),\n output_to_lhs_indices);\n };\n if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {\n return false;\n }\n if (lhs->sharding().IsTileMaximal()) {\n return MaybeImproveInstructionSharding(lhs->sharding(), instruction,\n may_combine_partial_sharding);\n }\n if (IsConvolutionKernelSmall(instruction)) {\n const auto& tile_assignment = lhs->sharding().tile_assignment();\n if (tile_assignment.dim(dnums.input_feature_dimension()) > 1) {\n return false;\n }\n return MaybeImproveInstructionSharding(get_tiled_sharding_based_on_lhs(),\n instruction,\n may_combine_partial_sharding);\n }\n return MaybeImproveInstructionSharding(\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n lhs->sharding(), {dnums.input_batch_dimension()}),\n instruction, may_combine_partial_sharding);\n}\nstd::optional InferBroadcastOperandSharding(\n const HloInstruction& instruction, bool is_spmd) {\n if (instruction.sharding().IsReplicated() ||\n instruction.sharding().IsManual()) {\n return instruction.sharding();\n }\n std::vector dims_to_replicate;\n bool needs_replication = false;\n for (int64_t i = 0; i < instruction.shape().rank(); ++i) {\n if (absl::c_count(instruction.dimensions(), i) == 0) {\n dims_to_replicate.push_back(i);\n if (instruction.sharding().tile_assignment().dim(i) > 1) {\n needs_replication = true;\n }\n }\n }\n if (!is_spmd && needs_replication) {\n return std::nullopt;\n }\n return hlo_sharding_util::RemoveShapeDimensions(\n hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n instruction.sharding(), dims_to_replicate),\n dims_to_replicate);\n}\nbool InferReduceShardingFromOperand(HloInstruction* instruction,\n bool may_combine_partial_sharding,\n bool is_spmd) {\n auto get_maybe_tuple_sharding = [&](HloSharding sharding) {\n if (instruction->shape().IsArray()) {\n return sharding;\n }\n std::vector tuple(instruction->shape().tuple_shapes_size(),\n std::move(sharding));\n return HloSharding::Tuple(instruction->shape(), tuple);\n };\n auto* reduce = Cast(instruction);\n bool changed = false;\n for (int64_t i = 0; i != reduce->inputs().size(); ++i) {\n HloInstruction* operand = reduce->inputs()[i];\n if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n continue;\n }\n if (operand->sharding().IsManual()) {\n changed |= MaybeImproveInstructionSubSharding(\n operand->sharding(), reduce, {i}, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n continue;\n }\n if (operand->sharding().IsReplicated() ||\n (!is_spmd &&\n absl::c_any_of(instruction->dimensions(), [operand](int64_t dim) {\n return operand->sharding().tile_assignment().dim(dim) > 1;\n }))) {\n changed |= MaybeImproveInstructionSharding(\n get_maybe_tuple_sharding(\n hlo_sharding_util::ReplicateAllDataDims(operand->sharding())),\n reduce, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n continue;\n }\n auto after_partial_replication =\n operand->sharding().IsReplicated()\n ? operand->sharding()\n : hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n operand->sharding(), reduce->dimensions());\n if (after_partial_replication.IsReplicated()) {\n changed |= MaybeImproveInstructionSharding(\n get_maybe_tuple_sharding(after_partial_replication), reduce,\n may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n continue;\n }\n HloSharding new_sharding =\n get_maybe_tuple_sharding(hlo_sharding_util::RemoveShapeDimensions(\n after_partial_replication, reduce->dimensions()));\n changed |= MaybeImproveInstructionSharding(\n std::move(new_sharding), reduce, may_combine_partial_sharding,\n ComputeNonRootUsers(reduce) == 1);\n }\n return changed;\n}\nabsl::StatusOr ProcessShardingInstruction(\n HloModule* module,\n const absl::flat_hash_set& execution_threads,\n bool replace_sharding_with_copy,\n absl::flat_hash_map>*\n unspecified_dims,\n std::vector* saved_root_shardings,\n absl::flat_hash_map* saved_parameter_shardings,\n absl::flat_hash_map*\n instruction_to_shard_group_id,\n absl::flat_hash_map>*\n shard_group_id_to_shard_as_group,\n absl::flat_hash_map>*\n shard_group_id_to_shard_like_group,\n const std::vector*\n allow_spmd_sharding_propagation_to_parameters_vector,\n bool remove_unknown_shardings) {\n bool changed = false;\n const bool use_shard_group = instruction_to_shard_group_id &&\n shard_group_id_to_shard_as_group &&\n shard_group_id_to_shard_like_group;\n auto process_shard_group_instruction =\n [&](HloInstruction* instruction,\n bool replaced_with_copy) -> absl::StatusOr {\n if (replace_sharding_with_copy) {\n if (use_shard_group && instruction->has_sharding() &&\n instruction->sharding().IsShardGroup()) {\n if (instruction->IsCustomCall(\"Sharding\")) {\n CHECK(instruction->operand(0)->opcode() != HloOpcode::kParameter ||\n (allow_spmd_sharding_propagation_to_parameters_vector &&\n allow_spmd_sharding_propagation_to_parameters_vector->size() ==\n module->entry_computation()->num_parameters() &&\n allow_spmd_sharding_propagation_to_parameters_vector->at(\n instruction->operand(0)->parameter_number())));\n }\n if (instruction->IsCustomCall(\"Sharding\") && !replaced_with_copy) {\n HloSharding operand_sharding =\n instruction->operand(0)->has_sharding()\n ? instruction->operand(0)->sharding()\n : HloSharding::Unknown();\n operand_sharding.SetShardGroup(\n instruction->sharding().GetShardGroup());\n instruction->mutable_operand(0)->set_sharding(\n std::move(operand_sharding));\n return true;\n } else {\n const int64_t shard_group_id =\n instruction->sharding().GetShardGroup().shard_group_id;\n (*instruction_to_shard_group_id)[instruction] = shard_group_id;\n if (instruction->sharding().IsShardAs()) {\n auto& shard_as_group =\n (*shard_group_id_to_shard_as_group)[shard_group_id];\n if (!shard_as_group.empty()) {\n CHECK(ShapeUtil::SameDimensions(\n instruction->shape(), (*shard_as_group.begin())->shape()))\n << \"Instruction: \" << instruction->ToString()\n << \" has different shape from the shapes of the other \"\n \"instructions within the same shard_as group: \"\n << (*shard_as_group.begin())->shape().ToString();\n }\n shard_as_group.insert(instruction);\n } else {\n auto& shard_like_group =\n (*shard_group_id_to_shard_like_group)[shard_group_id];\n if (!shard_like_group.empty()) {\n CHECK(ShapeUtil::SameDimensions(\n instruction->shape(), (*shard_like_group.begin())->shape()))\n << \"Instruction: \" << instruction->ToString()\n << \" has different shape from the shapes of the other \"\n \"instructions within the same shard_like group: \"\n << (*shard_like_group.begin())->shape().ToString();\n }\n shard_like_group.insert(instruction);\n }\n HloSharding sharding = instruction->sharding();\n sharding.ClearShardGroup();\n instruction->set_sharding(std::move(sharding));\n }\n }\n }\n return false;\n };\n for (HloComputation* computation : module->computations(execution_threads)) {\n auto instructions = computation->MakeInstructionPostOrder();\n for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {\n HloInstruction* instruction = *it;\n if (instruction->IsCustomCall(\"Sharding\")) {\n TF_RET_CHECK(instruction->has_sharding())\n << \"Sharding instruction must have a sharding attribute\";\n VLOG(3) << \"ProcessShardingInstruction: \" << instruction->ToString();\n HloSharding original_sharding = instruction->sharding();\n std::vector unspec_dims;\n TF_RETURN_IF_ERROR(sharding_op_util::ParseAttributes(\n Cast(instruction)->opaque(),\n &unspec_dims));\n bool replaced_with_copy =\n replace_sharding_with_copy &&\n (!original_sharding.IsUnknown() || remove_unknown_shardings ||\n instruction->operand(0)->opcode() == HloOpcode::kParameter);\n if (replaced_with_copy) {\n auto copy = computation->AddInstruction(HloInstruction::CreateUnary(\n instruction->shape(), HloOpcode::kCopy,\n instruction->mutable_operand(0)));\n TF_ASSIGN_OR_RETURN(\n std::ignore, computation->ReplaceInstruction(\n instruction, copy, false,\n false,\n false));\n copy->set_sharding(std::move(original_sharding));\n instruction = copy;\n changed = true;\n }\n TF_ASSIGN_OR_RETURN(\n bool shard_group_remove_instruction,\n process_shard_group_instruction(instruction, replaced_with_copy));\n if (!unspec_dims.empty()) {\n absl::c_sort(unspec_dims);\n unspecified_dims->emplace(instruction, std::move(unspec_dims));\n } else if (!instruction->operand(0)->has_sharding()) {\n instruction->mutable_operand(0)->set_sharding(\n instruction->sharding());\n }\n if (shard_group_remove_instruction) {\n TF_ASSIGN_OR_RETURN(std::ignore,\n computation->ReplaceInstruction(\n instruction, instruction->mutable_operand(0),\n false,\n false,\n false));\n }\n } else {\n TF_ASSIGN_OR_RETURN(std::ignore,\n process_shard_group_instruction(\n instruction, false));\n }\n }\n }\n HloInstruction* root_instr = module->entry_computation()->root_instruction();\n if (saved_root_shardings != nullptr && root_instr->shape().IsTuple() &&\n root_instr->has_sharding()) {\n saved_root_shardings->reserve(\n root_instr->sharding().tuple_elements().size());\n for (const HloSharding& sharding :\n root_instr->sharding().tuple_elements()) {\n saved_root_shardings->push_back(sharding);\n }\n }\n if (saved_parameter_shardings != nullptr) {\n auto params = module->entry_computation()->parameter_instructions();\n for (int64_t i = 0; i < params.size(); ++i) {\n if (params[i]->has_sharding()) {\n saved_parameter_shardings->insert({i, params[i]->sharding()});\n }\n }\n }\n return changed;\n}\nint64_t ComputeNonRootUsers(const HloInstruction* instr) {\n int64_t non_root_users = instr->users().size();\n for (int i = 0; i < instr->users().size(); ++i) {\n if (instr->users()[i] == instr->parent()->root_instruction()) {\n --non_root_users;\n }\n }\n return non_root_users;\n}\n absl::Status ShardingPropagation::NormalizeDomain(\n const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {\n if (metadata != nullptr) {\n TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,\n ShardingMetadata::ToShardingMetadata(metadata));\n const auto& sharding = sharding_metadata->sharding();\n if (sharding != nullptr) {\n bool is_spatially_partitioned = !sharding->HasUniqueDevice();\n if (sharding->IsTuple()) {\n is_spatially_partitioned = absl::c_any_of(\n sharding->tuple_elements(),\n [](const HloSharding& s) { return !s.HasUniqueDevice(); });\n }\n if (is_spatially_partitioned) {\n for (HloInstruction* d : domain.exit_domains) {\n HloInstruction* operand = d->mutable_operand(0);\n if (!operand->has_sharding() || operand->sharding() != *sharding) {\n HloSharding operand_sharding = *sharding;\n if (operand->shape().IsTuple() && !sharding->IsTuple()) {\n operand_sharding =\n HloSharding::SingleTuple(operand->shape(), *sharding);\n }\n operand->set_sharding(std::move(operand_sharding));\n }\n }\n return absl::OkStatus();\n }\n }\n }\n return ShardingMetadata::NormalizeShardingDomain(domain, metadata);\n}\nstd::optional ShardingPropagation::GetShardingFromUser(\n const HloInstruction& instruction, const HloInstruction& user,\n int64_t aggressiveness, bool is_spmd, const CallGraph& call_graph,\n const CustomCallShardingHelper* sharding_helper) {\n if (!CanPropagateThroughAtAggressiveLevel(user, aggressiveness)) {\n return std::nullopt;\n }\n if (!hlo_sharding_util::IsSpatiallyPartitioned(&user)) {\n return std::nullopt;\n }\n const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;\n switch (user.opcode()) {\n case HloOpcode::kBroadcast: {\n return InferBroadcastOperandSharding(user, is_spmd);\n }\n case HloOpcode::kConcatenate: {\n if (aggressiveness == 0) {\n return std::nullopt;\n }\n if (user.sharding().IsReplicated()) {\n return user.sharding();\n }\n const int64_t cdim = user.concatenate_dimension();\n auto& tile_assignment = user.sharding().tile_assignment();\n if (tile_assignment.dim(cdim) == 1) {\n return user.sharding();\n }\n if (is_spmd) {\n return user.sharding();\n }\n int64_t start_offset = 0;\n for (HloInstruction* op : user.operands()) {\n if (op == &instruction) {\n break;\n }\n start_offset += op->shape().dimensions(cdim);\n }\n const int64_t tile_shape = CeilOfRatio(\n user.shape().dimensions(cdim), tile_assignment.dimensions()[cdim]);\n std::vector start_indices(tile_assignment.num_dimensions());\n std::vector end_indices(tile_assignment.dimensions().begin(),\n tile_assignment.dimensions().end());\n start_indices[cdim] = start_offset / tile_shape;\n end_indices[cdim] = CeilOfRatio(\n start_offset + instruction.shape().dimensions(cdim), tile_shape);\n auto new_tile_assignment =\n tile_assignment.array().Slice(start_indices, end_indices);\n if (new_tile_assignment.num_elements() == 1) {\n return HloSharding::AssignDevice(*new_tile_assignment.begin(),\n user.sharding().metadata());\n }\n return HloSharding::Tile(std::move(new_tile_assignment),\n user.sharding().metadata());\n }\n case HloOpcode::kConvolution: {\n auto dot_dims = dot_as_convolution_util::ParseConvolutionDimsInfo(&user);\n if (dot_dims.conv_spatial_dims.empty()) {\n int64_t op_idx = user.operand_index(&instruction);\n return hlo_sharding_util::InferDotOperandSharding(\n &user, op_idx, dot_dims, true,\n may_combine_partial_sharding);\n }\n return std::nullopt;\n }\n case HloOpcode::kDynamicSlice:\n case HloOpcode::kDynamicUpdateSlice: {\n if (aggressiveness == 0) {\n return std::nullopt;\n }\n if (user.sharding().IsReplicated()) {\n return user.sharding();\n }\n if (user.opcode() == HloOpcode::kDynamicUpdateSlice &&\n &instruction == user.operand(0)) {\n return user.sharding();\n }\n const HloInstruction* operand = user.opcode() == HloOpcode::kDynamicSlice\n ? user.operand(0)\n : user.operand(1);\n if (&instruction != operand) {\n return std::nullopt;\n }\n std::vector slice_dims;\n for (int64_t i = 0; i < user.shape().rank(); ++i) {\n if (user.shape().dimensions(i) != operand->shape().dimensions(i)) {\n slice_dims.push_back(i);\n }\n }\n return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n user.sharding(), slice_dims);\n }\n case HloOpcode::kReduceWindow: {\n auto* reduce_window = Cast(&user);\n if (!absl::c_linear_search(reduce_window->inputs(), &instruction)) {\n return std::nullopt;\n }\n if (reduce_window->shape().IsTuple()) {\n auto sub_sharding = reduce_window->sharding().GetSubSharding(\n reduce_window->shape(),\n {reduce_window->operand_index(&instruction)});\n return sub_sharding;\n }\n return reduce_window->sharding();\n }\n case HloOpcode::kReshape: {\n return hlo_sharding_util::PropagateShardingThroughReshape(\n user.shape(), instruction.shape(), user.sharding());\n }\n case HloOpcode::kPad: {\n if (&instruction != user.operand(0)) {\n return std::nullopt;\n }\n return user.sharding();\n }\n case HloOpcode::kSlice: {\n return user.sharding();\n }\n case HloOpcode::kTranspose: {\n std::vector reverse_dimensions(user.dimensions().size());\n for (int64_t i = 0; i < user.dimensions().size(); ++i) {\n reverse_dimensions[user.dimensions(i)] = i;\n }\n return hlo_sharding_util::TransposeSharding(user.sharding(),\n reverse_dimensions);\n }\n case HloOpcode::kTuple: {\n auto sub_sharding = user.sharding().GetSubSharding(\n user.shape(), {user.operand_index(&instruction)});\n for (int64_t i = 0; i < user.shape().tuple_shapes_size(); ++i) {\n if (user.operand(i) == &instruction) {\n HloSharding alternative_sub_sharding =\n user.sharding().GetSubSharding(user.shape(), {i});\n if (hlo_sharding_util::IsShardingMoreSpecific(\n alternative_sub_sharding, sub_sharding)) {\n sub_sharding = alternative_sub_sharding;\n }\n }\n }\n return sub_sharding;\n }\n case HloOpcode::kGetTupleElement: {\n int64_t sharding_index = 0;\n for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {\n if (i == user.tuple_index()) {\n break;\n }\n if (instruction.shape().tuple_shapes(i).IsArray()) {\n sharding_index += 1;\n } else {\n sharding_index +=\n ShapeUtil::GetLeafCount(instruction.shape().tuple_shapes(i));\n }\n }\n auto base_instruction_sharding = [&](const HloSharding& user_sharding) {\n if (instruction.has_sharding()) {\n return instruction.sharding();\n } else {\n std::vector shardings;\n ShapeUtil::ForEachSubshape(\n instruction.shape(),\n [&](const Shape& sub_shape, const ShapeIndex& index) {\n if (ShapeUtil::IsLeafIndex(instruction.shape(), index)) {\n shardings.push_back(hlo_sharding_util::ReplicateAllDataDims(\n user_sharding, sub_shape.dimensions_size()));\n }\n });\n return HloSharding::Tuple(instruction.shape(), shardings);\n }\n };\n if (user.shape().IsArray()) {\n HloSharding new_sharding = base_instruction_sharding(user.sharding());\n new_sharding.tuple_elements()[sharding_index] = user.sharding();\n return new_sharding;\n } else {\n if (user.sharding().tuple_elements().empty()) {\n return std::nullopt;\n }\n HloSharding new_sharding =\n base_instruction_sharding(user.sharding().tuple_elements()[0]);\n for (int64_t i = 0; i < user.sharding().tuple_elements().size(); ++i) {\n new_sharding.tuple_elements()[sharding_index + i] =\n user.sharding().tuple_elements()[i];\n }\n return new_sharding;\n }\n }\n case HloOpcode::kDot: {\n int64_t op_idx = user.operand_index(&instruction);\n auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(&user);\n return hlo_sharding_util::InferDotOperandSharding(\n &user, op_idx, dnums, true,\n may_combine_partial_sharding);\n }\n case HloOpcode::kReduce: {\n if (instruction.shape().rank() == 0) {\n return std::nullopt;\n }\n auto user_sharding =\n user.shape().IsTuple()\n ? user.sharding().GetSubSharding(\n user.shape(), {user.operand_index(&instruction)})\n : user.sharding();\n if (!user_sharding.IsTileMaximal()) {\n std::vector target_tile_assignment_dimensions(\n instruction.shape().rank() +\n (user_sharding.ReplicateOnLastTileDim() ? 1 : 0) +\n user_sharding.subgroup_types().size());\n const auto& dimensions = user.dimensions();\n int64_t next_output_dim = 0;\n for (int64_t i = 0; i < target_tile_assignment_dimensions.size(); ++i) {\n if (absl::c_find(dimensions, i) == dimensions.end()) {\n target_tile_assignment_dimensions[i] =\n user_sharding.tile_assignment().dim(next_output_dim++);\n } else {\n target_tile_assignment_dimensions[i] = 1;\n }\n }\n auto tile_assignment = user_sharding.tile_assignment().Reshape(\n target_tile_assignment_dimensions);\n user_sharding =\n user_sharding.ReplicateOnLastTileDim()\n ? HloSharding::PartialTile(tile_assignment,\n user_sharding.metadata())\n : HloSharding::Subgroup(tile_assignment,\n user_sharding.subgroup_types(),\n user_sharding.metadata());\n }\n const auto* reduce = Cast(&user);\n for (const HloInstruction* operand : reduce->inputs()) {\n if (operand != &instruction && operand->has_sharding()) {\n hlo_sharding_util::MergeShardingIfCompatible(operand->sharding(),\n &user_sharding);\n }\n }\n return user_sharding;\n }\n case HloOpcode::kSort: {\n HloSharding user_sharding = user.sharding();\n if (user_sharding.IsTuple()) {\n return user_sharding.GetSubSharding(user.shape(),\n {user.operand_index(&instruction)});\n }\n return user_sharding;\n }\n case HloOpcode::kReverse: {\n return hlo_sharding_util::ReverseSharding(user.sharding(),\n user.dimensions());\n }\n case HloOpcode::kOutfeed: {\n if (&instruction != user.operand(0)) {\n return std::nullopt;\n }\n std::vector operand_shapes(user.operand_count());\n for (int i = 0; i < user.operand_count(); ++i) {\n operand_shapes[i] = user.operand(i)->shape();\n }\n return user.sharding().GetSubSharding(\n ShapeUtil::MakeTupleShape(operand_shapes), {0});\n }\n case HloOpcode::kGather: {\n if (&instruction == user.operand(1)) {\n return hlo_sharding_util::\n GatherIndexShardingFromOutputIndexPassthroughDimensions(\n user.sharding(), &user);\n }\n if (is_spmd) {\n return hlo_sharding_util::GatherOperandShardingFromOutput(\n user.sharding(), user, call_graph);\n }\n return std::nullopt;\n }\n case HloOpcode::kScatter: {\n auto& scatter_user = *Cast(&user);\n const int64_t operand_count = scatter_user.scatter_operand_count();\n auto scatter_operands = scatter_user.scatter_operands();\n auto scatter_indices = scatter_user.scatter_indices();\n auto scatter_updates = scatter_user.scatter_updates();\n const int64_t operand_index =\n absl::c_find(scatter_operands, &instruction) -\n scatter_operands.cbegin();\n if (operand_index < operand_count) {\n return user.sharding().IsTuple() ? user.sharding().GetSubSharding(\n user.shape(), {operand_index})\n : user.sharding();\n }\n if (&instruction == scatter_indices) {\n std::vector partitioned_updates;\n for (const HloInstruction* update : scatter_updates) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(update)) {\n partitioned_updates.push_back(update);\n }\n }\n if (partitioned_updates.empty()) {\n return std::nullopt;\n }\n std::vector shardings;\n absl::c_transform(\n partitioned_updates, std::back_inserter(shardings),\n [&scatter_user](const HloInstruction* update) {\n return hlo_sharding_util::\n ScatterIndexShardingFromUpdateIndexPassthroughDimensions(\n update->sharding(), &scatter_user);\n });\n return hlo_sharding_util::FindCommonSharding(shardings);\n }\n const int64_t update_index = absl::c_find(scatter_updates, &instruction) -\n scatter_updates.cbegin();\n CHECK_LE(update_index, operand_count);\n auto from_indices =\n hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)\n ? hlo_sharding_util::\n ScatterUpdateShardingFromIndexIndexPassthroughDimensions(\n scatter_indices->sharding(), &scatter_user)\n : HloSharding::Replicate();\n if (is_spmd) {\n auto from_output = hlo_sharding_util::ScatterUpdateShardingFromOutput(\n user.sharding().IsTuple()\n ? user.sharding().GetSubSharding(user.shape(), {update_index})\n : user.sharding(),\n scatter_user, call_graph);\n if (from_output.has_value()) {\n hlo_sharding_util::MergeShardingIfCompatible(from_indices,\n &*from_output);\n if (!from_output->IsTileMaximal()) {\n return from_output;\n }\n }\n }\n if (!from_indices.IsTileMaximal()) {\n return from_indices;\n }\n return std::nullopt;\n }\n case HloOpcode::kCustomCall: {\n bool compatible_shapes = ShapeUtil::CompatibleIgnoringElementType(\n instruction.shape(), user.shape());\n if (!compatible_shapes) {\n return std::nullopt;\n }\n if (!sharding_helper) {\n return user.sharding();\n }\n if (sharding_helper->CanPropagateShardingToOperands(&user)) {\n return user.sharding();\n }\n return std::nullopt;\n }\n default: {\n if (ShapeUtil::CompatibleIgnoringElementType(instruction.shape(),\n user.shape())) {\n return user.sharding();\n }\n return std::nullopt;\n }\n }\n}\nbool AggressiveConcatOperandShardingCanPassThrough(\n const HloInstruction* concat_operand) {\n return (\n hlo_sharding_util::IsSpatiallyPartitioned(concat_operand) &&\n (concat_operand->has_sharding() &&\n concat_operand->sharding().NumTiles() > 1) &&\n concat_operand->opcode() == HloOpcode::kReshape &&\n (concat_operand->operand(0)->opcode() == HloOpcode::kParameter ||\n concat_operand->operand(0)->opcode() == HloOpcode::kGetTupleElement));\n}\nbool InferDynamicUpdateSliceShardingFromOperand1(\n HloInstruction* instruction, bool may_combine_partial_sharding) {\n CHECK(instruction->opcode() == HloOpcode::kDynamicSlice ||\n instruction->opcode() == HloOpcode::kDynamicUpdateSlice);\n const HloInstruction* operand =\n instruction->opcode() == HloOpcode::kDynamicSlice\n ? instruction->operand(0)\n : instruction->operand(1);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n CHECK(!operand->sharding().IsManual());\n std::vector slice_dims;\n for (int64_t i = 0; i < instruction->shape().rank(); ++i) {\n if (instruction->shape().dimensions(i) != operand->shape().dimensions(i)) {\n slice_dims.push_back(i);\n }\n }\n return MaybeImproveInstructionSharding(\n hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n operand->sharding(), slice_dims),\n instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n}\nbool InferDynamicUpdateSliceShardingFromOperand0(\n HloInstruction* instruction, bool may_combine_partial_sharding) {\n CHECK_EQ(instruction->opcode(), HloOpcode::kDynamicUpdateSlice);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {\n return false;\n }\n return MaybeImproveInstructionSharding(instruction->operand(0)->sharding(),\n instruction,\n may_combine_partial_sharding);\n}\nbool ShardingPropagation::InferShardingFromShardGroup(\n HloInstruction* instruction, int64_t aggressiveness,\n const absl::flat_hash_set& shard_group) {\n if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {\n return false;\n }\n if (instruction->has_sharding() && instruction->sharding().IsManual()) {\n return false;\n }\n if (instruction->IsCustomCall(spmd::kShardBarrierTo)) {\n return false;\n }\n if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {\n for (const HloInstruction* member : shard_group) {\n if (!member->has_sharding() || !member->sharding().IsManual() ||\n member == instruction) {\n continue;\n }\n instruction->set_sharding(member->sharding());\n return true;\n }\n }\n const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;\n bool changed = false;\n for (const HloInstruction* member : shard_group) {\n if (member == instruction ||\n member->IsCustomCall(spmd::kShardBarrierFrom)) {\n continue;\n }\n changed |= MaybeImproveInstructionSharding(member->sharding(), instruction,\n may_combine_partial_sharding);\n }\n return changed;\n}\nbool ShardingPropagation::InferShardingFromOperands(\n HloInstruction* instruction, const ComputationMap& computation_map,\n int64_t aggressiveness, const CallGraph& call_graph,\n const absl::flat_hash_set& execution_threads) {\n if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {\n return false;\n }\n if (instruction->has_sharding() && instruction->sharding().IsManual()) {\n return false;\n }\n const bool custom_call_condition =\n instruction->opcode() == HloOpcode::kCustomCall &&\n instruction->shape().IsTuple();\n const bool async_instr_condition =\n instruction->IsAsynchronous() &&\n !HloInstruction::IsThreadIncluded(instruction->async_execution_thread(),\n execution_threads);\n if ((!instruction->has_sharding() ||\n instruction->sharding().IsTileMaximal()) &&\n (instruction->shape().IsArray() ||\n instruction->opcode() == HloOpcode::kReduce ||\n instruction->opcode() == HloOpcode::kSort ||\n instruction->opcode() == HloOpcode::kReduceWindow ||\n custom_call_condition || async_instr_condition)) {\n for (const HloInstruction* op : instruction->operands()) {\n if (!op->has_sharding() || !op->sharding().IsManual()) continue;\n if (instruction->IsCustomCall(\"SPMDShardToFullShape\")) {\n return false;\n }\n if (aggressiveness == 0 &&\n (instruction->opcode() == HloOpcode::kConcatenate ||\n instruction->opcode() == HloOpcode::kDynamicSlice)) {\n return false;\n }\n instruction->set_sharding(\n HloSharding::Manual(op->sharding().metadata())\n .NormalizeTupleSharding(instruction->shape()));\n return true;\n }\n }\n const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;\n if (!SupportSpatialPartitioning(\n instruction, computation_map, is_spmd_,\n allow_spmd_sharding_propagation_to_output_,\n false,\n sharding_helper_.get())) {\n if (instruction->shape().IsTuple() || instruction->operand_count() == 0 ||\n instruction == instruction->parent()->root_instruction() ||\n instruction->HasSideEffect()) {\n return false;\n }\n for (const HloInstruction* op : instruction->operands()) {\n if (op->has_sharding() && op->sharding().IsTileMaximal() &&\n !op->sharding().HasUniqueDevice()) {\n return MaybeImproveInstructionSharding(op->sharding(), instruction,\n may_combine_partial_sharding);\n }\n }\n return false;\n }\n auto get_maybe_tuple_sharding = [&](HloSharding sharding) {\n if (instruction->shape().IsArray()) {\n return sharding;\n }\n std::vector tuple(instruction->shape().tuple_shapes_size(),\n std::move(sharding));\n return HloSharding::Tuple(instruction->shape(), tuple);\n };\n switch (instruction->opcode()) {\n case HloOpcode::kGetTupleElement: {\n const HloInstruction* operand = instruction->operand(0);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n HloSharding new_sharding = operand->sharding().GetSubSharding(\n operand->shape(), {instruction->tuple_index()});\n if (new_sharding.IsManual()) {\n instruction->set_sharding(std::move(new_sharding));\n return true;\n }\n return MaybeImproveInstructionSharding(\n std::move(new_sharding), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n }\n case HloOpcode::kTuple: {\n if (absl::c_none_of(\n instruction->operands(), [](const HloInstruction* hlo) {\n return hlo_sharding_util::IsSpatiallyPartitioned(hlo);\n })) {\n return false;\n }\n const Shape& shape = instruction->shape();\n std::vector sub_shardings;\n if (instruction->has_sharding()) {\n sub_shardings = instruction->sharding().tuple_elements();\n } else {\n sub_shardings.assign(HloSharding::RequiredLeaves(shape),\n HloSharding::Replicate());\n }\n auto is_more_specific = [instruction](const HloSharding& operand_sharding,\n const HloSharding& existing) {\n return !instruction->has_sharding() ||\n hlo_sharding_util::IsShardingMoreSpecific(operand_sharding,\n existing);\n };\n int64_t sub_sharding_index = 0;\n for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {\n const HloInstruction* operand = instruction->operand(i);\n if (operand->has_sharding()) {\n if (operand->shape().IsTuple()) {\n for (int64_t j = 0, e = ShapeUtil::GetLeafCount(operand->shape());\n j < e; ++j) {\n if (is_more_specific(operand->sharding().tuple_elements()[j],\n sub_shardings[sub_sharding_index + j])) {\n sub_shardings[sub_sharding_index + j] =\n operand->sharding().tuple_elements()[j];\n }\n }\n } else {\n std::optional op_sharding =\n hlo_sharding_util::GetOutputSharding(operand);\n CHECK(op_sharding.has_value())\n << \"Expected sharding for \" << operand->ToString();\n if (is_more_specific(op_sharding.value(),\n sub_shardings[sub_sharding_index])) {\n sub_shardings[sub_sharding_index] = op_sharding.value();\n }\n }\n }\n sub_sharding_index += ShapeUtil::GetLeafCount(operand->shape());\n }\n HloSharding new_sharding = HloSharding::Tuple(shape, sub_shardings);\n if (!instruction->has_sharding() ||\n new_sharding != instruction->sharding()) {\n instruction->set_sharding(std::move(new_sharding));\n return true;\n }\n return false;\n }\n case HloOpcode::kReduce: {\n return InferReduceShardingFromOperand(\n instruction, may_combine_partial_sharding, is_spmd_);\n }\n case HloOpcode::kBroadcast: {\n if (aggressiveness < 3) {\n return false;\n }\n const HloInstruction* op = instruction->operand(0);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(op) ||\n op->sharding().IsReplicated()) {\n return false;\n }\n std::vector target_tile_assignment_dimensions;\n const auto& dimensions = instruction->dimensions();\n for (int64_t i = 0; i < instruction->shape().rank(); ++i) {\n auto it = absl::c_find(dimensions, i);\n if (it == dimensions.end()) {\n target_tile_assignment_dimensions.push_back(1);\n } else {\n const int64_t source_dim = std::distance(dimensions.begin(), it);\n target_tile_assignment_dimensions.push_back(\n op->sharding().tile_assignment().dim(source_dim));\n }\n }\n for (int64_t i = op->sharding().TiledDataRank();\n i < op->sharding().tile_assignment().num_dimensions(); ++i) {\n target_tile_assignment_dimensions.push_back(\n op->sharding().tile_assignment().dim(i));\n }\n auto new_tile_assignment = op->sharding().tile_assignment().Reshape(\n target_tile_assignment_dimensions);\n HloSharding new_sharding =\n op->sharding().ReplicateOnLastTileDim()\n ? HloSharding::PartialTile(new_tile_assignment,\n op->sharding().metadata())\n : HloSharding::Subgroup(new_tile_assignment,\n op->sharding().subgroup_types(),\n op->sharding().metadata());\n return MaybeImproveInstructionSharding(\n std::move(new_sharding), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n case HloOpcode::kConcatenate: {\n const HloInstruction* operand = PickRepresentativeOperand(instruction);\n if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n if (aggressiveness == 0) {\n for (const HloInstruction* concat_operand : instruction->operands()) {\n if (!AggressiveConcatOperandShardingCanPassThrough(concat_operand)) {\n return false;\n }\n const auto& tile_assignment =\n concat_operand->sharding().tile_assignment();\n for (int64_t i = 0; i < instruction->shape().rank(); ++i) {\n if (absl::c_linear_search(instruction->dimensions(), i) &&\n tile_assignment.dim(i) > 1) {\n return false;\n }\n }\n }\n }\n return MaybeImproveInstructionSharding(\n operand->sharding(), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n case HloOpcode::kConvolution:\n return InferConvolutionShardingFromOperands(\n instruction, call_graph, aggressiveness, may_combine_partial_sharding,\n is_spmd_);\n case HloOpcode::kTranspose: {\n const HloInstruction* input = instruction->operand(0);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(input)) {\n return false;\n }\n HloSharding sharding = hlo_sharding_util::TransposeSharding(\n input->sharding(), instruction->dimensions());\n return MaybeImproveInstructionSharding(\n std::move(sharding), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n case HloOpcode::kReduceWindow: {\n auto* reduce_window = Cast(instruction);\n auto has_dilation = [](const WindowDimension& dimensions) {\n return dimensions.base_dilation() > 1 ||\n dimensions.window_dilation() > 1;\n };\n if (absl::c_any_of(instruction->window().dimensions(), has_dilation)) {\n VLOG(2) << \"Not applying sharding to reduce window because dilatation \"\n \"isn't supported yet: \"\n << reduce_window->ToString();\n return false;\n }\n bool changed = false;\n for (HloInstruction* operand : reduce_window->inputs()) {\n if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n continue;\n }\n changed |= MaybeImproveInstructionSharding(\n get_maybe_tuple_sharding(operand->sharding()), reduce_window,\n may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n }\n return changed;\n }\n case HloOpcode::kSelectAndScatter: {\n const HloInstruction* lhs = instruction->operand(0);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {\n return false;\n }\n auto has_base_dilation = [](const WindowDimension& dimensions) {\n return dimensions.base_dilation() > 1;\n };\n if (absl::c_any_of(instruction->window().dimensions(),\n has_base_dilation)) {\n VLOG(2) << \"Not applying sharding to select-and-scatter because \"\n \"base dilation isn't supported yet: \"\n << instruction->ToString();\n return false;\n }\n return MaybeImproveInstructionSharding(\n lhs->sharding(), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n case HloOpcode::kReshape: {\n if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {\n return false;\n }\n HloSharding new_sharding =\n hlo_sharding_util::PropagateShardingThroughReshape(\n instruction->operand(0)->shape(), instruction->shape(),\n instruction->operand(0)->sharding());\n return MaybeImproveInstructionSharding(\n std::move(new_sharding), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n return false;\n }\n case HloOpcode::kReverse: {\n const HloInstruction* operand = instruction->operand(0);\n if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n return MaybeImproveInstructionSharding(\n hlo_sharding_util::ReverseSharding(operand->sharding(),\n instruction->dimensions()),\n instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n case HloOpcode::kDot: {\n const auto& dnums =\n dot_as_convolution_util::ParseDotGeneralFromDot(instruction);\n return InferDotShardingFromOperands(instruction, call_graph, dnums,\n may_combine_partial_sharding,\n is_spmd_);\n }\n case HloOpcode::kParameter: {\n auto parent_it = computation_map.find(instruction->parent());\n if (parent_it == computation_map.end()) {\n return false;\n }\n const HloInstruction* parent = parent_it->second;\n switch (parent->opcode()) {\n case HloOpcode::kConditional: {\n for (int64_t i = 1; i < parent->operand_count(); ++i) {\n if (parent->called_computations()[i - 1] == instruction->parent()) {\n if (parent->operand(i)->has_sharding()) {\n return MaybeImproveInstructionSharding(\n parent->operand(i)->sharding(), instruction,\n may_combine_partial_sharding);\n }\n return false;\n }\n }\n return false;\n }\n case HloOpcode::kCall: {\n int64_t i = instruction->parameter_number();\n if (parent->operand(i)->has_sharding()) {\n return MaybeImproveInstructionSharding(\n parent->operand(i)->sharding(), instruction,\n may_combine_partial_sharding);\n }\n return false;\n }\n default:\n return false;\n }\n }\n case HloOpcode::kSort: {\n const HloInstruction* operand = PickRepresentativeOperand(instruction);\n if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n HloSortInstruction* sort = DynCast(instruction);\n CHECK(sort);\n const int64_t sort_dim = sort->sort_dimension();\n if (!operand->sharding().IsTileMaximal() &&\n operand->sharding().tile_assignment().dim(sort_dim) != 1) {\n if (!hlo_sharding_util::IsSortOperandShardingMovable(operand, sort_dim))\n return false;\n }\n if (instruction->shape().IsTuple()) {\n return MaybeImproveInstructionSharding(\n HloSharding::SingleTuple(instruction->shape(), operand->sharding()),\n instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n } else {\n return MaybeImproveInstructionSharding(\n operand->sharding(), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n }\n }\n case HloOpcode::kDynamicSlice: {\n return InferDynamicUpdateSliceShardingFromOperand1(\n instruction, may_combine_partial_sharding);\n }\n case HloOpcode::kDynamicUpdateSlice: {\n bool changed = InferDynamicUpdateSliceShardingFromOperand1(\n instruction, may_combine_partial_sharding);\n changed |= InferDynamicUpdateSliceShardingFromOperand0(\n instruction, may_combine_partial_sharding);\n return changed;\n }\n case HloOpcode::kGather: {\n bool changed = false;\n const GatherDimensionNumbers& dnums =\n instruction->gather_dimension_numbers();\n if (!dnums.operand_batching_dims().empty()) {\n hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;\n explict_batch_dims.operand_parallel_dims.assign(\n dnums.operand_batching_dims().begin(),\n dnums.operand_batching_dims().end());\n explict_batch_dims.indices_parallel_dims.assign(\n dnums.start_indices_batching_dims().begin(),\n dnums.start_indices_batching_dims().end());\n changed |= InferGatherParallelShardingFromOperands(\n instruction, explict_batch_dims, may_combine_partial_sharding);\n }\n if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {\n HloSharding new_sharding = hlo_sharding_util::\n GatherOutputShardingFromIndexIndexPassthroughDimensions(\n instruction->operand(1)->sharding(), instruction);\n changed |= MaybeImproveInstructionSharding(\n std::move(new_sharding), instruction, may_combine_partial_sharding);\n }\n if (is_spmd_) {\n auto gather_parallel_dims =\n hlo_sharding_util::GetGatherParallelBatchDims(*instruction,\n call_graph);\n if (gather_parallel_dims) {\n changed |= InferGatherParallelShardingFromOperands(\n instruction, *gather_parallel_dims, may_combine_partial_sharding);\n }\n if (hlo_sharding_util::IsSpatiallyPartitioned(\n instruction->operand(0))) {\n absl::Span operand_parallel_dims;\n if (gather_parallel_dims) {\n operand_parallel_dims = absl::MakeConstSpan(\n gather_parallel_dims->operand_parallel_dims);\n }\n HloSharding filtered_operand_sharding =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(\n instruction->operand(0)->sharding(), operand_parallel_dims);\n auto maybe_from_data = hlo_sharding_util::\n GatherOutputShardingFromOperandOperandPassthroughDimensions(\n filtered_operand_sharding, *instruction);\n if (maybe_from_data) {\n changed |= MaybeImproveInstructionSharding(\n std::move(*maybe_from_data), instruction,\n may_combine_partial_sharding);\n }\n }\n }\n return changed;\n }\n case HloOpcode::kScatter: {\n auto& scatter = *Cast(instruction);\n bool changed = false;\n const ScatterDimensionNumbers& dnums =\n instruction->scatter_dimension_numbers();\n if (!dnums.input_batching_dims().empty()) {\n hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;\n explict_batch_dims.operand_parallel_dims.assign(\n dnums.input_batching_dims().begin(),\n dnums.input_batching_dims().end());\n explict_batch_dims.indices_parallel_dims.assign(\n dnums.scatter_indices_batching_dims().begin(),\n dnums.scatter_indices_batching_dims().end());\n changed |= InferScatterParallelShardingFromOperands(\n instruction, explict_batch_dims, may_combine_partial_sharding);\n }\n const int64_t operand_count = scatter.scatter_operand_count();\n auto scatter_operands = scatter.scatter_operands();\n auto scatter_indices = scatter.scatter_indices();\n auto scatter_updates = scatter.scatter_updates();\n if (is_spmd_) {\n for (int64_t i = 0; i != operand_count; ++i) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {\n changed |= MaybeImproveInstructionSubSharding(\n scatter_operands[i]->sharding(), instruction, {i},\n may_combine_partial_sharding);\n }\n }\n if (!hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices) &&\n absl::c_none_of(scatter_updates, [](const HloInstruction* update) {\n return hlo_sharding_util::IsSpatiallyPartitioned(update);\n })) {\n return changed;\n }\n if (auto scatter_parallel_dims =\n hlo_sharding_util::GetScatterParallelBatchDims(*instruction,\n call_graph)) {\n changed |= InferScatterParallelShardingFromOperands(\n instruction, *scatter_parallel_dims,\n may_combine_partial_sharding);\n }\n for (int64_t i = 0; i != operand_count; ++i) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {\n auto maybe_from_update =\n hlo_sharding_util::ScatterOutputShardingFromUpdate(\n scatter_updates[i]->sharding(), scatter);\n if (maybe_from_update) {\n changed |= MaybeImproveInstructionSubSharding(\n std::move(*maybe_from_update), instruction, {i},\n may_combine_partial_sharding);\n }\n }\n }\n } else {\n for (int64_t i = 0; i != operand_count; ++i) {\n changed |= MaybeImproveInstructionSubSharding(\n HloSharding::Replicate(), instruction, {i},\n may_combine_partial_sharding);\n }\n }\n return changed;\n }\n case HloOpcode::kWhile: {\n if (!instruction->operand(0)->has_sharding()) {\n return false;\n }\n auto sharding = instruction->operand(0)->sharding();\n if (instruction->has_sharding()) {\n hlo_sharding_util::MergeSharding(instruction->sharding(), &sharding,\n may_combine_partial_sharding);\n }\n return MaybeImproveInstructionSharding(std::move(sharding), instruction,\n may_combine_partial_sharding);\n }\n case HloOpcode::kCustomCall: {\n HloSharding inferred_operand_sharding = HloSharding::Replicate();\n if (auto* partitioner =\n GetCustomCallPartitioner(instruction->custom_call_target());\n partitioner && partitioner->IsCustomCallShardable(instruction)) {\n if (auto sharding =\n partitioner->InferShardingFromOperands(instruction)) {\n inferred_operand_sharding = *sharding;\n } else {\n return false;\n }\n } else if (sharding_helper_->IsCustomCallShardable(instruction)) {\n if (auto sharding =\n sharding_helper_->InferShardingFromOperands(instruction)) {\n inferred_operand_sharding = *sharding;\n } else {\n return false;\n }\n } else {\n const HloInstruction* operand = PickRepresentativeOperand(instruction);\n if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n inferred_operand_sharding = operand->sharding();\n }\n return MaybeImproveInstructionSharding(\n inferred_operand_sharding, instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n default: {\n if (instruction->IsElementwise() && may_combine_partial_sharding) {\n bool changed = false;\n for (auto operand : instruction->operands()) {\n if (hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n if (instruction->opcode() == HloOpcode::kRng) {\n changed |= MaybeImproveInstructionSharding(\n hlo_sharding_util::ReplicateAllDataDims(\n operand->sharding(), instruction->shape().rank()),\n instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) == 1);\n continue;\n }\n changed |= MaybeImproveInstructionSharding(\n operand->sharding(), instruction, may_combine_partial_sharding,\n instruction->operands().size() == 1 &&\n ComputeNonRootUsers(instruction) == 1);\n }\n }\n return changed;\n }\n const HloInstruction* operand = PickRepresentativeOperand(instruction);\n if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {\n return false;\n }\n return MaybeImproveInstructionSharding(\n operand->sharding(), instruction, may_combine_partial_sharding,\n ComputeNonRootUsers(instruction) ==\n 1);\n }\n }\n return false;\n} \nbool ShardingPropagation::InferShardingFromUsers(\n HloInstruction* instruction,\n const ShardingPropagation::ComputationMap& computation_map,\n int64_t aggressiveness, bool is_spmd,\n const CustomCallShardingHelper* sharding_helper,\n const CallGraph& call_graph) {\n if (aggressiveness < 2 && instruction->opcode() == HloOpcode::kBroadcast) {\n return false;\n }\n if (instruction->has_sharding() && instruction->sharding().IsManual()) {\n return false;\n }\n if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {\n for (const HloInstruction* user : instruction->users()) {\n if (!user->has_sharding() || user->IsCustomCall(\"SPMDFullToShardShape\"))\n continue;\n if (instruction->shape().IsArray() && user->sharding().IsManual()) {\n instruction->set_sharding(\n HloSharding::Manual(user->sharding().metadata()));\n return true;\n } else {\n std::optional user_sharding =\n ShardingPropagation::GetShardingFromUser(\n *instruction, *user, aggressiveness, is_spmd, call_graph,\n sharding_helper);\n if (user_sharding && user_sharding->IsManual()) {\n instruction->set_sharding(std::move(*user_sharding));\n return true;\n }\n }\n }\n }\n if (!SupportSpatialPartitioning(\n instruction, computation_map, is_spmd,\n false,\n allow_spmd_sharding_propagation_to_parameters_, sharding_helper)) {\n return false;\n }\n bool improved_sharding = false;\n const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;\n for (const HloInstruction* user : instruction->users()) {\n if (user->opcode() == HloOpcode::kRngBitGenerator) {\n instruction->set_sharding(HloSharding::Replicate());\n return true;\n }\n std::optional user_sharding =\n ShardingPropagation::GetShardingFromUser(*instruction, *user,\n aggressiveness, is_spmd,\n call_graph, sharding_helper);\n if (user_sharding && instruction->opcode() == HloOpcode::kCustomCall) {\n if (auto* partitioner =\n GetCustomCallPartitioner(instruction->custom_call_target())) {\n if (partitioner->IsCustomCallShardable(instruction)) {\n user_sharding = partitioner->PropagateUserSharding(instruction, user,\n *user_sharding);\n }\n } else if (sharding_helper->IsCustomCallShardable(instruction)) {\n user_sharding = sharding_helper->PropagateUserSharding(\n instruction, user, *user_sharding);\n }\n }\n if (user_sharding) {\n improved_sharding |= MaybeImproveInstructionSharding(\n std::move(*user_sharding), instruction, may_combine_partial_sharding);\n }\n }\n return improved_sharding;\n}\nvoid ShardingPropagation::MaybeComputationPropagation(\n const ComputationMap& computation_map,\n const absl::flat_hash_set& provided_shardings,\n HloInstruction* instruction,\n absl::flat_hash_set* changed) {\n auto propagate_to_instruction = [&](HloInstruction* search_inst) {\n auto related_instructions =\n GetRelatedInstructions(search_inst, computation_map);\n if (absl::c_count(related_instructions, instruction)) {\n for (HloInstruction* inst : related_instructions) {\n if ((!inst->has_sharding() ||\n inst->sharding() != instruction->sharding()) &&\n !provided_shardings.contains(inst)) {\n VLOG(2) << \"Add computation sharding: \" << inst->name() << \" \"\n << instruction->sharding().ToString();\n inst->copy_sharding(instruction);\n changed->insert(inst);\n MaybeComputationPropagation(computation_map, provided_shardings, inst,\n changed);\n }\n }\n }\n };\n if (instruction->opcode() == HloOpcode::kConditional ||\n instruction->opcode() == HloOpcode::kWhile ||\n instruction->opcode() == HloOpcode::kCustomCall ||\n instruction->opcode() == HloOpcode::kCall) {\n propagate_to_instruction(instruction);\n }\n if (instruction->opcode() == HloOpcode::kParameter ||\n instruction->parent()->root_instruction() == instruction) {\n auto it = computation_map.find(instruction->parent());\n if (it != computation_map.end()) {\n propagate_to_instruction(it->second);\n if (instruction->opcode() == HloOpcode::kParameter &&\n (it->second->opcode() == HloOpcode::kConditional ||\n it->second->opcode() == HloOpcode::kCall)) {\n propagate_to_instruction(instruction);\n }\n }\n }\n}\nabsl::StatusOr ShardingPropagation::RunToFixPoint(\n int64_t aggressiveness, bool propagate_shard_group,\n const ComputationMap& computation_map,\n const absl::flat_hash_set& provided_shardings,\n const CallGraph& call_graph, HloModule* module,\n const absl::flat_hash_set& execution_threads,\n absl::flat_hash_map>&\n unspecified_dims,\n absl::flat_hash_map&\n instruction_to_shard_group_id,\n absl::flat_hash_map>&\n shard_group_id_to_shard_as_group,\n absl::flat_hash_map>&\n shard_group_id_to_shard_like_group,\n int64_t& iterations) {\n bool changed = false;\n absl::flat_hash_set already_inferred_from_shard_group;\n absl::flat_hash_set already_inferred_from_operands;\n absl::flat_hash_set already_inferred_from_users;\n bool changed_last_iter = true;\n const bool may_merge_partial = is_spmd_ && aggressiveness > 0;\n while (changed_last_iter) {\n changed_last_iter = false;\n int64_t inferred_from_shard_group_counter = 0;\n int64_t inferred_from_operand_counter = 0;\n int64_t inferred_from_user_counter = 0;\n int64_t instruction_counter = 0;\n int64_t already_sharded_counter = 0;\n for (const HloComputation* computation :\n module->computations(execution_threads)) {\n VLOG(2) << \"Consider computation: \" << computation->name();\n std::vector instructions =\n computation->MakeInstructionPostOrder();\n instruction_counter += instructions.size();\n already_sharded_counter += absl::c_count_if(\n instructions,\n [](const HloInstruction* inst) { return inst->has_sharding(); });\n auto clear_cache = [&](HloInstruction* hlo,\n HloInstruction* hlo_for_users = nullptr) {\n for (auto operand : hlo->operands()) {\n already_inferred_from_users.erase(operand);\n }\n if (hlo_for_users == nullptr) {\n hlo_for_users = hlo;\n }\n for (auto user : hlo_for_users->users()) {\n already_inferred_from_operands.erase(user);\n for (auto c : user->called_computations()) {\n for (auto parameter : c->parameter_instructions()) {\n already_inferred_from_operands.erase(parameter);\n }\n }\n }\n if (instruction_to_shard_group_id.contains(hlo)) {\n const int64_t shard_group_id = instruction_to_shard_group_id.at(hlo);\n const absl::flat_hash_set& shard_group =\n shard_group_id_to_shard_as_group.contains(shard_group_id)\n ? shard_group_id_to_shard_as_group.at(shard_group_id)\n : shard_group_id_to_shard_like_group.at(shard_group_id);\n for (HloInstruction* member : shard_group) {\n if (member != hlo) {\n already_inferred_from_shard_group.erase(member);\n }\n }\n }\n };\n if (propagate_shard_group) {\n for (HloInstruction* instruction : instructions) {\n if (already_inferred_from_shard_group.contains(instruction)) {\n continue;\n }\n if (!instruction_to_shard_group_id.contains(instruction)) {\n continue;\n }\n const int64_t shard_group_id =\n instruction_to_shard_group_id.at(instruction);\n const absl::flat_hash_set& shard_group =\n shard_group_id_to_shard_as_group.contains(shard_group_id)\n ? shard_group_id_to_shard_as_group.at(shard_group_id)\n : shard_group_id_to_shard_like_group.at(shard_group_id);\n if (provided_shardings.contains(instruction)) {\n if (!may_merge_partial) {\n continue;\n }\n auto it = unspecified_dims.find(instruction);\n if (it != unspecified_dims.end() &&\n InferUnspecifiedDimsFromShardGroup(instruction, it->second,\n shard_group)) {\n ++inferred_from_shard_group_counter;\n VLOG(2) << \"Refined partial sharding (shard group): \"\n << instruction->ToString();\n clear_cache(instruction);\n already_inferred_from_shard_group.insert(instruction);\n changed_last_iter = true;\n }\n continue;\n }\n already_inferred_from_shard_group.insert(instruction);\n if (InferShardingFromShardGroup(instruction, aggressiveness,\n shard_group)) {\n ++inferred_from_shard_group_counter;\n changed = true;\n VLOG(2) << \"Add sharding (shard group): \"\n << instruction->ToString();\n absl::flat_hash_set changed_in_comp_prop;\n MaybeComputationPropagation(computation_map, provided_shardings,\n instruction, &changed_in_comp_prop);\n clear_cache(instruction);\n for (auto hlo : changed_in_comp_prop) {\n clear_cache(hlo);\n }\n changed_last_iter = true;\n }\n }\n }\n for (HloInstruction* instruction : instructions) {\n if (already_inferred_from_operands.contains(instruction)) {\n continue;\n }\n if (provided_shardings.contains(instruction)) {\n if (!may_merge_partial) {\n continue;\n }\n auto it = unspecified_dims.find(instruction);\n HloInstruction* man_conversion_op_after;\n if (it != unspecified_dims.end() &&\n InferUnspecifiedDimsFromOperand(instruction, it->second,\n &man_conversion_op_after)) {\n ++inferred_from_operand_counter;\n VLOG(2) << \"Refined partial sharding (forward-pass): \"\n << instruction->ToString();\n clear_cache(instruction, man_conversion_op_after);\n already_inferred_from_operands.insert(instruction);\n changed_last_iter = true;\n }\n continue;\n }\n already_inferred_from_operands.insert(instruction);\n if (InferShardingFromOperands(instruction, computation_map,\n aggressiveness, call_graph,\n execution_threads)) {\n ++inferred_from_operand_counter;\n changed = true;\n VLOG(2) << \"Add sharding (forward-pass): \" << instruction->ToString();\n absl::flat_hash_set changed_in_comp_prop;\n MaybeComputationPropagation(computation_map, provided_shardings,\n instruction, &changed_in_comp_prop);\n clear_cache(instruction);\n for (auto hlo : changed_in_comp_prop) {\n clear_cache(hlo);\n }\n changed_last_iter = true;\n }\n }\n for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {\n if ((*it)->IsCustomCall(\"SPMDFullToShardShape\") ||\n (*it)->IsCustomCall(\"SPMDShardToFullShape\")) {\n if (!already_inferred_from_users.contains(*it)) {\n already_inferred_from_users.erase((*it)->operand(0));\n }\n }\n if (already_inferred_from_users.contains(*it)) {\n continue;\n }\n if (provided_shardings.contains(*it)) {\n if (!may_merge_partial) {\n continue;\n }\n auto uit = unspecified_dims.find(*it);\n HloInstruction* man_conversion_op_after;\n if (uit != unspecified_dims.end() &&\n InferUnspecifiedDimsFromUsers(*it, uit->second, aggressiveness,\n is_spmd_, &man_conversion_op_after,\n call_graph)) {\n ++inferred_from_user_counter;\n VLOG(2) << \"Refined partial sharding (backward-pass): \"\n << (*it)->ToString();\n clear_cache(*it, man_conversion_op_after);\n already_inferred_from_users.insert(*it);\n if (man_conversion_op_after != nullptr) {\n already_inferred_from_users.insert(man_conversion_op_after);\n }\n changed_last_iter = true;\n }\n continue;\n }\n already_inferred_from_users.insert(*it);\n if (InferShardingFromUsers(*it, computation_map, aggressiveness,\n is_spmd_, sharding_helper_.get(),\n call_graph)) {\n ++inferred_from_user_counter;\n changed = true;\n VLOG(2) << \"Add sharding (backward-pass): \" << (*it)->ToString();\n absl::flat_hash_set changed_in_comp_prop;\n MaybeComputationPropagation(computation_map, provided_shardings, *it,\n &changed_in_comp_prop);\n clear_cache(*it);\n for (auto hlo : changed_in_comp_prop) {\n clear_cache(hlo);\n }\n changed_last_iter = true;\n }\n }\n }\n VLOG(1) << \"Sharding propagation iteration \" << iterations << \";\"\n << \"\\n total instructions: \" << instruction_counter\n << \"\\n instructions already sharded: \" << already_sharded_counter\n << \"\\n shardings inferred from shard group: \"\n << inferred_from_shard_group_counter\n << \"\\n shardings inferred from operands: \"\n << inferred_from_operand_counter\n << \"\\n shardings inferred from users: \"\n << inferred_from_user_counter\n << \"\\n aggressiveness: \" << aggressiveness;\n ++iterations;\n }\n return changed;\n}\nstd::vector ShardingPropagation::GetRelatedInstructions(\n HloInstruction* inst, const ComputationMap& computation_map) {\n if (inst->opcode() == HloOpcode::kWhile) {\n return std::vector{\n inst, inst->while_body()->root_instruction(),\n inst->while_body()->parameter_instruction(0),\n inst->while_condition()->parameter_instruction(0)};\n } else if (inst->opcode() == HloOpcode::kConditional) {\n const auto& called_computations = inst->called_computations();\n std::vector comps;\n comps.reserve(called_computations.size() + 1);\n comps.push_back(inst);\n for (HloComputation* c : called_computations) {\n comps.push_back(c->root_instruction());\n }\n return comps;\n } else if (inst->opcode() == HloOpcode::kCustomCall) {\n if (sharding_helper_ && sharding_helper_->IsCustomCallShardable(inst)) {\n return sharding_helper_->GetRelatedInstructions(inst);\n } else {\n return std::vector{};\n }\n } else if (inst->opcode() == HloOpcode::kCall) {\n HloComputation* callee = inst->called_computations().front();\n return std::vector{inst, callee->root_instruction()};\n } else if (inst->opcode() == HloOpcode::kParameter) {\n auto it = computation_map.find(inst->parent());\n if (it != computation_map.end()) {\n if (it->second->opcode() == HloOpcode::kConditional) {\n HloInstruction* cond = it->second;\n for (int64_t i = 1; i < cond->operand_count(); ++i) {\n if (cond->called_computations()[i - 1] == inst->parent()) {\n return std::vector{inst, cond->mutable_operand(i)};\n }\n }\n }\n if (it->second->opcode() == HloOpcode::kCall) {\n HloInstruction* call = it->second;\n int64_t operand_index = inst->parameter_number();\n CHECK_LT(operand_index, call->operand_count());\n return std::vector{\n inst, call->mutable_operand(operand_index)};\n }\n }\n return std::vector{};\n } else {\n CHECK(false);\n }\n};\nabsl::StatusOr ShardingPropagation::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n ABSL_CONST_INIT static absl::once_flag did_registration;\n absl::call_once(did_registration, [] {\n RegisterCustomCallPartitioner(\n spmd::kShardBarrierFrom,\n std::make_unique());\n RegisterCustomCallPartitioner(\n spmd::kShardBarrierTo,\n std::make_unique());\n });\n std::optional>\n original_sharding;\n bool any_changed = false;\n if (cse_prevention_only_) {\n original_sharding.emplace();\n for (auto computation : module->computations(execution_threads)) {\n for (auto instruction : computation->instructions()) {\n if (instruction->has_sharding()) {\n original_sharding->emplace(instruction, instruction->sharding());\n }\n }\n }\n } else {\n for (auto computation : module->computations(execution_threads)) {\n for (auto instruction : computation->instructions()) {\n if (instruction->has_sharding() &&\n IsCSEPreventionSharding(instruction->sharding())) {\n instruction->clear_sharding();\n any_changed = true;\n }\n }\n }\n }\n any_changed |= propagate_metadata_\n ? AssignShardingMetadata(module, execution_threads)\n : RemoveShardingMetadata(module, execution_threads);\n absl::flat_hash_map>\n unspecified_dims;\n std::vector saved_root_shardings;\n absl::flat_hash_map saved_parameter_shardings;\n absl::flat_hash_map instruction_to_shard_group_id;\n absl::flat_hash_map>\n shard_group_id_to_shard_as_group;\n absl::flat_hash_map>\n shard_group_id_to_shard_like_group;\n TF_ASSIGN_OR_RETURN(\n bool changed,\n ProcessShardingInstruction(\n module, execution_threads, !cse_prevention_only_, &unspecified_dims,\n allow_spmd_sharding_propagation_to_output_ ? &saved_root_shardings\n : nullptr,\n allow_spmd_sharding_propagation_to_parameters_\n ? &saved_parameter_shardings\n : nullptr,\n &instruction_to_shard_group_id, &shard_group_id_to_shard_as_group,\n &shard_group_id_to_shard_like_group,\n &allow_spmd_sharding_propagation_to_parameters_vector_));\n any_changed |= changed;\n for (const auto& [shard_group_id, shard_as_group] :\n shard_group_id_to_shard_as_group) {\n VLOG(5) << \"Shard-As group \" << shard_group_id << \" contains:\";\n for (auto instruction : shard_as_group) {\n VLOG(5) << \" \" << instruction->ToString();\n }\n }\n for (const auto& [shard_group_id, shard_like_group] :\n shard_group_id_to_shard_like_group) {\n VLOG(5) << \"Shard-Like group \" << shard_group_id << \" contains:\";\n for (auto instruction : shard_like_group) {\n VLOG(5) << \" \" << instruction->ToString();\n }\n }\n if (allow_spmd_sharding_propagation_to_output_) {\n CHECK(!module->entry_computation()->root_instruction()->has_sharding() ||\n allow_spmd_sharding_propagation_to_output_vector_.size() == 1 ||\n module->entry_computation()\n ->root_instruction()\n ->sharding()\n .tuple_elements()\n .size() ==\n allow_spmd_sharding_propagation_to_output_vector_.size())\n << \"allow-spmd-sharding-propagation-to-output-vector's size can be \"\n \"either 1 or the number of elements in the root tuple of entry \"\n \"computation.\";\n }\n if (allow_spmd_sharding_propagation_to_parameters_) {\n auto is_same_sized_tuple = [](HloModule* module, int64_t size) {\n if (module->entry_computation()->num_parameters() != 1) {\n return false;\n }\n HloInstruction* param =\n module->entry_computation()->parameter_instruction(0);\n return param->shape().IsTuple() &&\n size == param->shape().tuple_shapes_size();\n };\n auto size = allow_spmd_sharding_propagation_to_parameters_vector_.size();\n CHECK(size == 1 || size == module->entry_computation()->num_parameters() ||\n is_same_sized_tuple(module, size))\n << \"allow-spmd-sharding-propagation-to-parameters-vector's size can be \"\n \"either 1 or the number of parameters in the entry computation.\";\n }\n ComputationMap computation_map;\n absl::flat_hash_set provided_shardings;\n for (auto computation : module->computations(execution_threads)) {\n for (auto instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kWhile) {\n TF_RETURN_IF_ERROR(\n CheckAndUpdateDeviceAssignmentsInWhileBody(instruction));\n }\n }\n }\n for (auto computation : module->computations(execution_threads)) {\n for (auto instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kWhile ||\n instruction->opcode() == HloOpcode::kConditional ||\n instruction->opcode() == HloOpcode::kCall) {\n const HloInstruction* sharded_inst = nullptr;\n auto related_instructions =\n GetRelatedInstructions(instruction, computation_map);\n for (auto inst : related_instructions) {\n if (inst->has_sharding()) {\n sharded_inst = inst;\n break;\n }\n }\n if (sharded_inst != nullptr) {\n for (auto inst : related_instructions) {\n inst->copy_sharding(sharded_inst);\n }\n }\n if (instruction->opcode() == HloOpcode::kWhile) {\n computation_map[instruction->while_body()] = instruction;\n computation_map[instruction->while_condition()] = instruction;\n } else {\n for (HloComputation* c : instruction->called_computations()) {\n computation_map[c] = instruction;\n }\n }\n }\n }\n }\n for (const HloComputation* computation :\n module->computations(execution_threads)) {\n for (const HloInstruction* inst : computation->instructions()) {\n if (inst->has_sharding() &&\n inst != module->entry_computation()->root_instruction() &&\n inst->opcode() != HloOpcode::kParameter &&\n !inst->sharding().IsUnknown()) {\n provided_shardings.insert(inst);\n }\n }\n }\n HloInstruction* entry_root = module->entry_computation()->root_instruction();\n if (!allow_spmd_sharding_propagation_to_output_ &&\n (!entry_root->has_sharding() || !entry_root->sharding().IsUnknown())) {\n if (entry_root->opcode() == HloOpcode::kWhile) {\n HloInstruction* copy = module->entry_computation()->AddInstruction(\n HloInstruction::CreateUnary(entry_root->shape(), HloOpcode::kCopy,\n entry_root));\n if (entry_root->has_sharding()) {\n copy->set_sharding(entry_root->sharding());\n }\n module->entry_computation()->set_root_instruction(copy);\n entry_root = copy;\n any_changed = true;\n }\n provided_shardings.insert(entry_root);\n }\n if (!allow_spmd_sharding_propagation_to_parameters_) {\n for (auto param : module->entry_computation()->parameter_instructions()) {\n if (param->has_sharding() && !param->sharding().IsUnknown()) {\n provided_shardings.insert(param);\n }\n }\n }\n for (HloComputation* computation : module->computations(execution_threads)) {\n auto instructions = computation->MakeInstructionPostOrder();\n for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {\n HloInstruction* instruction = *it;\n if (instruction->has_sharding() && instruction->sharding().IsUnknown()) {\n instruction->set_sharding(\n HloSharding::Replicate(instruction->sharding().metadata()));\n }\n }\n }\n int64_t iterations = 0;\n std::unique_ptr call_graph = CallGraph::Build(module);\n for (int64_t aggressiveness = 0; aggressiveness < 4; ++aggressiveness) {\n TF_ASSIGN_OR_RETURN(\n bool changed,\n RunToFixPoint(aggressiveness, true,\n computation_map, provided_shardings, *call_graph, module,\n execution_threads, unspecified_dims,\n instruction_to_shard_group_id,\n shard_group_id_to_shard_as_group,\n shard_group_id_to_shard_like_group, iterations));\n any_changed = any_changed || changed;\n }\n for (const auto& [shard_as_group_id, shard_as_group] :\n shard_group_id_to_shard_as_group) {\n HloSharding default_sharding = HloSharding::Replicate();\n std::vector shardings;\n for (HloInstruction* instruction : shard_as_group) {\n if (instruction->has_sharding()) {\n shardings.push_back(instruction->sharding());\n if (!instruction->IsCustomCall(spmd::kShardBarrierFrom) &&\n default_sharding.IsReplicated()) {\n default_sharding = instruction->sharding();\n }\n }\n }\n HloSharding common_sharding = shardings.empty()\n ? default_sharding\n : hlo_sharding_util::FindCommonSharding(\n shardings, default_sharding);\n VLOG(2) << \"Aligning shard group: \" << shard_as_group_id\n << \" to sharding:\" << common_sharding.ToString();\n for (HloInstruction* member : shard_as_group) {\n if (member->IsCustomCall(spmd::kShardBarrierTo)) {\n continue;\n }\n if (provided_shardings.contains(member)) {\n auto it = unspecified_dims.find(member);\n if (it != unspecified_dims.end()) {\n HloSharding partial_replicated =\n hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(\n common_sharding, it->second);\n HloSharding sharding = member->sharding();\n if (hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,\n &sharding)) {\n member->set_sharding(sharding);\n }\n }\n }\n member->set_sharding(common_sharding);\n }\n }\n for (HloComputation* computation : module->computations(execution_threads)) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&\n instruction_to_shard_group_id.contains(instruction) &&\n shard_group_id_to_shard_as_group.contains(\n instruction_to_shard_group_id.at(instruction))) {\n HloSharding sharding = instruction->sharding();\n hlo_sharding_util::MergeShardingIfCompatible(\n instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),\n &sharding);\n instruction->mutable_operand(0)->set_sharding(std::move(sharding));\n }\n }\n }\n {\n TF_ASSIGN_OR_RETURN(\n bool changed,\n RunToFixPoint(3, true,\n computation_map, provided_shardings, *call_graph, module,\n execution_threads, unspecified_dims,\n instruction_to_shard_group_id,\n shard_group_id_to_shard_as_group,\n shard_group_id_to_shard_like_group, iterations));\n any_changed = any_changed || changed;\n }\n for (HloComputation* computation : module->computations(execution_threads)) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&\n instruction_to_shard_group_id.contains(instruction) &&\n shard_group_id_to_shard_as_group.contains(\n instruction_to_shard_group_id.at(instruction))) {\n HloSharding sharding = instruction->sharding();\n hlo_sharding_util::MergeShardingIfCompatible(\n instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),\n &sharding);\n instruction->mutable_operand(0)->set_sharding(std::move(sharding));\n }\n if (instruction->IsCustomCall(spmd::kShardBarrierFrom) ||\n instruction->IsCustomCall(spmd::kShardBarrierTo)) {\n TF_ASSIGN_OR_RETURN(std::ignore,\n computation->ReplaceInstruction(\n instruction, instruction->mutable_operand(0),\n false,\n false,\n false));\n }\n }\n }\n if (cse_prevention_only_) {\n for (auto computation : module->computations(execution_threads)) {\n for (auto instruction : computation->instructions()) {\n if (!instruction->has_sharding()) {\n continue;\n }\n if (IsCSEPreventionTarget(instruction) && instruction->has_sharding()) {\n if (!(*original_sharding).contains(instruction)) {\n instruction->set_sharding(\n SetCSEPreventionSharding(instruction->sharding()));\n }\n continue;\n }\n auto it = (*original_sharding).find(instruction);\n if (it != (*original_sharding).end()) {\n instruction->set_sharding(it->second);\n } else {\n instruction->clear_sharding();\n }\n }\n }\n }\n HloInstruction* root_instruction =\n module->entry_computation()->root_instruction();\n if (saved_root_shardings.size() ==\n allow_spmd_sharding_propagation_to_output_vector_.size() &&\n root_instruction->has_sharding()) {\n HloSharding root_sharding = root_instruction->sharding();\n for (int i = 0; i < saved_root_shardings.size(); ++i) {\n if (!allow_spmd_sharding_propagation_to_output_vector_[i] &&\n !saved_root_shardings[i].IsUnknown()) {\n root_sharding.tuple_elements()[i] = saved_root_shardings[i];\n }\n }\n root_instruction->set_sharding(std::move(root_sharding));\n }\n auto params = module->entry_computation()->parameter_instructions();\n if (allow_spmd_sharding_propagation_to_parameters_) {\n if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==\n params.size()) {\n for (int64_t i = 0; i < params.size(); ++i) {\n if (!allow_spmd_sharding_propagation_to_parameters_vector_[i]) {\n if (saved_parameter_shardings.contains(i) &&\n !saved_parameter_shardings.at(i).IsUnknown()) {\n params[i]->set_sharding(saved_parameter_shardings.at(i));\n } else {\n params[i]->clear_sharding();\n }\n }\n }\n } else if (params.size() == 1 && saved_parameter_shardings.size() == 1 &&\n params[0]->shape().IsTuple() &&\n params[0]->shape().tuple_shapes_size() ==\n allow_spmd_sharding_propagation_to_parameters_vector_\n .size()) {\n HloSharding param_sharding = params[0]->sharding();\n for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {\n HloSharding saved_subsharding =\n saved_parameter_shardings.at(0).GetSubSharding(params[0]->shape(),\n {i});\n if (!allow_spmd_sharding_propagation_to_parameters_vector_[i] &&\n !saved_subsharding.IsUnknown()) {\n param_sharding.tuple_elements()[i] = saved_subsharding;\n }\n }\n params[0]->set_sharding(std::move(param_sharding));\n }\n }\n std::function evenly_partitions =\n [&evenly_partitions](const Shape& shape,\n const HloSharding& sharding) -> bool {\n if (!sharding.IsTiled()) {\n return true;\n }\n if (sharding.IsTileMaximal()) {\n return sharding.IsReplicated();\n }\n if (sharding.IsTuple()) {\n for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {\n if (!evenly_partitions(ShapeUtil::GetTupleElementShape(shape, i),\n sharding.GetSubSharding(shape, {i}))) {\n return false;\n }\n }\n }\n for (int64_t i = 0; i < shape.dimensions_size(); ++i) {\n if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) {\n return false;\n }\n }\n return true;\n };\n if (allow_spmd_sharding_propagation_to_output_ &&\n root_instruction->has_sharding()) {\n if (root_instruction->shape().IsTuple() &&\n allow_spmd_sharding_propagation_to_output_vector_.size() ==\n root_instruction->shape().tuple_shapes_size()) {\n HloSharding root_sharding = root_instruction->sharding();\n for (int64_t i = 0; i < root_instruction->shape().tuple_shapes_size();\n ++i) {\n if (allow_spmd_sharding_propagation_to_output_vector_[i] &&\n !evenly_partitions(root_instruction->shape().tuple_shapes(i),\n root_sharding.tuple_elements()[i])) {\n root_sharding.tuple_elements()[i] = HloSharding::Replicate();\n }\n }\n root_instruction->set_sharding(std::move(root_sharding));\n } else if (!root_instruction->shape().IsTuple()) {\n if (!evenly_partitions(root_instruction->shape(),\n root_instruction->sharding())) {\n root_instruction->set_sharding(HloSharding::Replicate());\n }\n }\n }\n if (allow_spmd_sharding_propagation_to_parameters_) {\n if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==\n params.size()) {\n for (int64_t i = 0; i < params.size(); ++i) {\n if (params[i]->has_sharding() &&\n allow_spmd_sharding_propagation_to_parameters_vector_[i] &&\n !evenly_partitions(params[i]->shape(), params[i]->sharding())) {\n params[i]->set_sharding(HloSharding::Replicate());\n }\n }\n } else if (params.size() == 1 && params[0]->shape().IsTuple() &&\n params[0]->has_sharding() &&\n params[0]->shape().tuple_shapes_size() ==\n allow_spmd_sharding_propagation_to_parameters_vector_\n .size()) {\n HloSharding param_sharding = params[0]->sharding();\n for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {\n if (allow_spmd_sharding_propagation_to_parameters_vector_[i] &&\n !evenly_partitions(\n ShapeUtil::GetSubshapeOneIndex(params[0]->shape(), i),\n params[0]->sharding().GetSubSharding(params[0]->shape(),\n {i}))) {\n param_sharding.tuple_elements()[i] = HloSharding::Replicate();\n }\n }\n params[0]->set_sharding(std::move(param_sharding));\n }\n }\n TF_RETURN_IF_ERROR(\n hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation(\n module, allow_spmd_sharding_propagation_to_output_,\n allow_spmd_sharding_propagation_to_parameters_));\n VLOG(1) << \"Sharding propagation completed after \" << iterations\n << \" iterations\";\n return any_changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/sharding_propagation.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/log/check.h\"\n#include \"absl/log/log.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_op_metadata.h\"\n#include \"xla/hlo/ir/hlo_sharding.h\"\n#include \"xla/hlo/transforms/hlo_constant_splitter.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/protobuf_util.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\n#include \"xla/xla_data.pb.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace op = xla::testing::opcode_matchers;\nnamespace xla {\nnamespace {\nusing ShardingPropagationTest = HloTestBase;\nvoid ClearMetadata(HloModule* module) {\n for (HloComputation* computation : module->computations()) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->metadata().ByteSizeLong() != 0) {\n instruction->set_metadata(OpMetadata());\n }\n if (!instruction->has_sharding()) {\n continue;\n }\n instruction->set_sharding(instruction->sharding().WithoutMetadata());\n }\n }\n}\nstruct MetadataTestParameter {\n explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata)\n : propagate_metadata(propagate_metadata),\n clear_metadata(clear_metadata) {}\n bool propagate_metadata = false;\n bool clear_metadata = false;\n};\nstruct MetadataTestParameterWithOutput {\n explicit MetadataTestParameterWithOutput(bool propagate_metadata,\n bool clear_metadata,\n bool allow_root_sharding_propagation)\n : propagate_metadata(propagate_metadata),\n clear_metadata(clear_metadata),\n allow_root_sharding_propagation(allow_root_sharding_propagation) {}\n bool propagate_metadata = false;\n bool clear_metadata = false;\n bool allow_root_sharding_propagation = false;\n};\nclass ParameterizedMetadataTest\n : public HloTestBase,\n public ::testing::WithParamInterface {};\nclass ParameterizedMetadataTestWithOutput\n : public HloTestBase,\n public ::testing::WithParamInterface {};\nstd::string OpMetadataListToString(absl::Span metadata) {\n std::vector metadata_strings;\n metadata_strings.reserve(metadata.size());\n for (const OpMetadata& element : metadata) {\n metadata_strings.push_back(\n absl::StrCat(\"{\", OpMetadataToString(element), \"}\"));\n }\n return absl::StrCat(\"{\", absl::StrJoin(metadata_strings, \", \"), \"}\");\n}\nclass HloShardingMetadataMatcher\n : public ::testing::MatcherInterface {\n public:\n explicit HloShardingMetadataMatcher(absl::Span metadata)\n : metadata_(metadata.begin(), metadata.end()) {}\n bool MatchAndExplain(\n const HloSharding& sharding,\n ::testing::MatchResultListener* listener) const override {\n if (sharding.metadata().size() != metadata_.size()) {\n *listener << sharding.ToString(true)\n << \" has incorrect sharding metadata (expected: \"\n << OpMetadataListToString(metadata_) << \")\";\n return false;\n }\n for (int i = 0, e = metadata_.size(); i < e; ++i) {\n if (!protobuf_util::ProtobufEquals(sharding.metadata()[i],\n metadata_[i])) {\n *listener << sharding.ToString(true)\n << \" has incorrect sharding metadata (expected: \"\n << OpMetadataListToString(metadata_) << \")\";\n return false;\n }\n }\n return true;\n }\n void DescribeTo(std::ostream* os) const override {\n *os << OpMetadataListToString(metadata_);\n }\n private:\n std::vector metadata_;\n};\n::testing::Matcher ShardingMetadata(\n absl::Span metadata) {\n return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata));\n}\nOpMetadata CreateMetadata(const std::string& op_name) {\n OpMetadata metadata;\n metadata.set_op_name(op_name);\n return metadata;\n}\nINSTANTIATE_TEST_SUITE_P(\n ShardingPropagation, ParameterizedMetadataTest,\n ::testing::Values(MetadataTestParameter(false,\n false),\n MetadataTestParameter(false,\n true),\n MetadataTestParameter(true,\n false),\n MetadataTestParameter(true,\n true)),\n [](const ::testing::TestParamInfo& info) {\n return absl::StrCat(info.param.propagate_metadata\n ? \"MetadataPropagation\"\n : \"NoMetadataPropagation\",\n \"_\",\n info.param.clear_metadata ? \"NoMetadataInModule\"\n : \"MetadataInModule\");\n });\nINSTANTIATE_TEST_SUITE_P(\n ShardingPropagation, ParameterizedMetadataTestWithOutput,\n ::testing::Values(MetadataTestParameterWithOutput(\n false,\n false,\n false),\n MetadataTestParameterWithOutput(\n false,\n true,\n false),\n MetadataTestParameterWithOutput(\n true,\n false,\n false),\n MetadataTestParameterWithOutput(\n true,\n true,\n false),\n MetadataTestParameterWithOutput(\n false,\n false,\n true),\n MetadataTestParameterWithOutput(\n false,\n true,\n true),\n MetadataTestParameterWithOutput(\n true,\n false,\n true),\n MetadataTestParameterWithOutput(\n true,\n true,\n true)),\n [](const ::testing::TestParamInfo& info) {\n return absl::StrCat(\n info.param.propagate_metadata ? \"MetadataPropagation\"\n : \"NoMetadataPropagation\",\n \"_\",\n info.param.clear_metadata ? \"NoMetadataInModule\" : \"MetadataInModule\",\n \"_\",\n info.param.allow_root_sharding_propagation ? \"PropagateToRoot\"\n : \"NoPropagateToRoot\");\n });\nTEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3},\n metadata={op_name=\"test\"}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n EXPECT_EQ(changed,\n GetParam().propagate_metadata && !GetParam().clear_metadata);\n auto* instruction = FindInstruction(module.get(), \"param0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"test\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"name\"}},\n metadata={op_name=\"test\"}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n ShardingPropagation(false,\n true)\n .Run(module.get()));\n EXPECT_FALSE(changed);\n auto* instruction = FindInstruction(module.get(), \"param0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"name\")}));\n}\nTEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"name\"}}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n ShardingPropagation(false,\n true)\n .Run(module.get()));\n EXPECT_FALSE(changed);\n auto* instruction = FindInstruction(module.get(), \"param0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"name\")}));\n}\nTEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(bool changed,\n ShardingPropagation(false,\n true)\n .Run(module.get()));\n EXPECT_FALSE(changed);\n auto* instruction = FindInstruction(module.get(), \"param0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n}\nTEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)\n %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)\n %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[3,2048,2048]{2,1,0} parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n %broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2}\n ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[3,2048,2048]{2,1,0} parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n %shard-barrier-from = f32[3,2048,2048]{2,1,0} custom-call(%param0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%shard-barrier-from), dimensions={0,1,2}\n ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[13]{0} parameter(0)\n %broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, BroadcastBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[13]{0} parameter(0)\n %param0_copy = f32[13]{0} copy(param0)\n %shard-barrier-to = f32[13]{0} custom-call(%param0_copy), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%shard-barrier-to), dimensions={3}\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),\n sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"param0_copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTest, Broadcast1DBackwardNoChange) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = s32[128]{0} parameter(0)\n %constant0 = s32[] constant(0), sharding={replicated}\n %broadcast = s32[128]{0} broadcast(%constant0), dimensions={}, sharding={replicated}\n ROOT %compare = pred[128]{0} compare(s32[128]{0} %param0, s32[128]{0} %broadcast),\n direction=NE, sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n EXPECT_FALSE(changed);\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[3,2048]parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}\n ROOT %copy = f32[3,2048,3] copy(%broadcast)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n }\n}\nTEST_P(ParameterizedMetadataTest, BroadcastMerge) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[3,2048]parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}\n ROOT %copy = f32[3,2048,3] copy(%broadcast),\n sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"broadcast\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\"), CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, BroadcastUser) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[24,8]{0,1} parameter(0)\n %copy = f32[24,8]{0,1} copy(%param0)\n ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3},\n sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,4]0,1,2,3,4,5,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %broadcast {\n %param0 = f32[24,8]{0,1} parameter(0)\n %copy = f32[24,8]{0,1} copy(%param0)\n ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3},\n sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[4,2,1,1]0,1,2,3,4,5,6,7}\"));\n }\n}\nTEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n %init = f32[] parameter(1)\n %reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add\n ROOT %copy = f32[5,7]{0,1} copy(%reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_F(ShardingPropagationTest, ManualTupleReduceForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%minmax_func {\n %lhs_value = f32[] parameter(0)\n %rhs_value = f32[] parameter(2)\n %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT\n %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)\n %lhs_index = s32[] parameter(1)\n %rhs_index = s32[] parameter(3)\n %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)\n ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)\n}\nENTRY %reduce {\n get-tuple-element.416 = f32[2,1,128]{2,1,0} parameter(0), sharding={manual}\n get-tuple-element.417 = s32[2,1,128]{2,1,0} parameter(1), sharding={manual}\n constant.3793 = f32[] constant(0)\n constant.3795 = s32[] constant(0)\n reduce.418 = (f32[2,1]{1,0}, s32[2,1]{1,0}) reduce(\n get-tuple-element.416, get-tuple-element.417, constant.3793, constant.3795),\n dimensions={2}, to_apply=minmax_func\n ROOT %copy = (f32[2,1]{1,0}, s32[2,1]{1,0}) copy(%reduce.418)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce.418\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{{manual}, {manual}}\"));\n}\nTEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n %init = f32[] parameter(1)\n %reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add\n ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReduceForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}}\n %init = f32[] parameter(1)\n %shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%param0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %reduce = f32[7,11]{1,0} reduce(%shard-barrier-from, %init), dimensions={0,3}, to_apply=%add\n ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[8,8] parameter(0),\n sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"a\"}}\n %init = f32[] parameter(1)\n %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add\n ROOT %copy = f32[8] copy(%reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,2]0,2,1,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[8,8] parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %init = f32[] parameter(1)\n %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add\n ROOT %copy = f32[8] copy(%reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReducePartiallyBackward) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[8,8] parameter(0)\n %input = f32[8,8] copy(%param0)\n %init = f32[] parameter(1)\n %reduce = f32[8] reduce(%input, %init), dimensions={0}, to_apply=%add,\n sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = f32[8] copy(%reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReduceBackwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce {\n %param0 = f32[8,8] parameter(0)\n %input = f32[8,8] copy(%param0)\n %init = f32[] parameter(1)\n %shard-barrier-to = f32[8,8] custom-call(%input), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %reduce = f32[8] reduce(%shard-barrier-to, %init), dimensions={0}, to_apply=%add,\n sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = f32[8] copy(%reduce)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTestWithOutput,\n ShardedOnNonReduceDimTupleReduceForwardAndBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%minmax_func {\n %lhs_value = f32[] parameter(0)\n %rhs_value = f32[] parameter(2)\n %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT\n %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)\n %lhs_index = s32[] parameter(1)\n %rhs_index = s32[] parameter(3)\n %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)\n ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)\n}\nENTRY %main {\n %param0 = f32[28,10] parameter(0)\n %param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %copy_param0 = f32[28,10] copy(%param0)\n %init0 = f32[] parameter(2)\n %init1 = s32[] parameter(3)\n %reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),\n dimensions={1}, to_apply=%minmax_func\n %gte0 = f32[28] get-tuple-element(%reduce), index=0\n %gte1 = s32[28] get-tuple-element(%reduce), index=1\n %copy0 = f32[28] copy(%gte0)\n %copy1 = s32[28] copy(%gte1)\n ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* reduce = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(reduce, nullptr);\n EXPECT_THAT(reduce, op::Sharding(\"{{devices=[2]0,1},{devices=[2]0,1}}\"));\n auto* copy_param0 = FindInstruction(module.get(), \"copy_param0\");\n ASSERT_NE(copy_param0, nullptr);\n EXPECT_THAT(copy_param0, op::Sharding(\"{devices=[2,1]0,1}\"));\n for (const HloSharding& sharding :\n {copy_param0->sharding(), reduce->sharding().tuple_elements()[0],\n reduce->sharding().tuple_elements()[1]}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(sharding, ShardingMetadata({}));\n }\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[2]0,1},{devices=[2]0,1}}\"));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput,\n ShardedOnReduceDimTupleReduceForwardAndBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%minmax_func {\n %lhs_value = f32[] parameter(0)\n %rhs_value = f32[] parameter(2)\n %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT\n %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)\n %lhs_index = s32[] parameter(1)\n %rhs_index = s32[] parameter(3)\n %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)\n ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)\n}\nENTRY %main {\n %param0 = f32[28,10] parameter(0)\n %param1 = s32[28,10] parameter(1), sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"a\"}}\n %copy_param0 = f32[28,10] copy(%param0)\n %init0 = f32[] parameter(2)\n %init1 = s32[] parameter(3)\n %reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),\n dimensions={1}, to_apply=%minmax_func\n %gte0 = f32[28] get-tuple-element(%reduce), index=0\n %gte1 = s32[28] get-tuple-element(%reduce), index=1\n %copy0 = f32[28] copy(%gte0)\n %copy1 = s32[28] copy(%gte1)\n ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* reduce = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(reduce, nullptr);\n EXPECT_THAT(reduce, op::Sharding(\"{{devices=[2,2]0,1,2,3 \"\n \"last_tile_dim_replicate},{devices=[2,2]0,1,\"\n \"2,3 last_tile_dim_replicate}}\"));\n auto* copy_param0 = FindInstruction(module.get(), \"copy_param0\");\n ASSERT_NE(copy_param0, nullptr);\n EXPECT_THAT(copy_param0, op::Sharding(\"{devices=[2,2]0,1,2,3}\"));\n for (const HloSharding& sharding :\n {copy_param0->sharding(), reduce->sharding().tuple_elements()[0],\n reduce->sharding().tuple_elements()[1]}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(sharding, ShardingMetadata({}));\n }\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[2,2]0,1,2,3 \"\n \"last_tile_dim_replicate},{devices=[2,2]0,1,2,3 \"\n \"last_tile_dim_replicate}}\"));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %gte {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)\n %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(\n %param0, %param0)\n %tuple.1 = (f32[5,7,11,13]{3,2,1,0},\n (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(\n %param0, %tuple),\n sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}},\n {replicated metadata={op_name=\"b\"}},\n {devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"c\"}}}\n %gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%tuple.1), index=0\n %gte.1 = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) get-tuple-element(\n %tuple.1), index=1\n %gte.2 = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%gte.1), index=0\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* gte = FindInstruction(module.get(), \"gte\");\n ASSERT_NE(gte, nullptr);\n EXPECT_THAT(gte, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n auto* gte1 = FindInstruction(module.get(), \"gte.1\");\n ASSERT_NE(gte1, nullptr);\n EXPECT_THAT(gte1, op::Sharding(\"{{replicated}, {devices=[1,2,2,1]0,1,2,3}}\"));\n auto* gte2 = FindInstruction(module.get(), \"gte.2\");\n ASSERT_NE(gte2, nullptr);\n EXPECT_THAT(gte2, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(gte->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(gte1->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(gte1->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"c\")}));\n EXPECT_THAT(gte2->sharding(), ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n for (const HloSharding& sharding :\n {gte->sharding(), gte1->sharding().tuple_elements()[0],\n gte1->sharding().tuple_elements()[1], gte2->sharding()}) {\n EXPECT_THAT(sharding, ShardingMetadata({}));\n }\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{replicated}\"));\n }\n}\nTEST_P(ParameterizedMetadataTestWithOutput,\n GetTupleElementForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %gte {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)\n %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(\n %param0, %param0), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"a\"}},\n {replicated metadata={op_name=\"b\"}}}\n %shard-barrier-from = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) custom-call(%tuple), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%shard-barrier-from), index=0\n ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* gte = FindInstruction(module.get(), \"gte\");\n ASSERT_NE(gte, nullptr);\n EXPECT_FALSE(gte->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, TupleForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %tuple {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"b\"}}\n %param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)\n %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(\n %param1, %param2)\n %tuple.1 = (f32[5,7,11,13]{3,2,1,0},\n (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(\n %param0, %tuple)\n ROOT %copy = (f32[5,7,11,13]{3,2,1,0},\n (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) copy(\n %tuple.1)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(tuple, nullptr);\n EXPECT_THAT(tuple, op::Sharding(\"{{devices=[1,2,2,1]0,1,2,3},\"\n \" {replicated}}\"));\n auto* tuple1 = FindInstruction(module.get(), \"tuple.1\");\n ASSERT_NE(tuple1, nullptr);\n EXPECT_THAT(tuple1, op::Sharding(\"{{replicated},\"\n \" {devices=[1,2,2,1]0,1,2,3},\"\n \" {replicated}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(tuple->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));\n EXPECT_THAT(tuple1->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(tuple1->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(tuple1->sharding().tuple_elements()[2], ShardingMetadata({}));\n } else {\n for (const HloSharding& tuple_sharding :\n {tuple->sharding(), tuple1->sharding()}) {\n for (const HloSharding& sub_sharding : tuple_sharding.tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, TupleForwardPass_SplatBug) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %tuple {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),\n sharding={devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dims={manual} metadata={op_name=\"b\"}}\n %param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)\n %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(\n %param1, %param2)\n ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(tuple, nullptr);\n EXPECT_THAT(tuple, op::Sharding(\"{{devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 \"\n \"last_tile_dims={manual}}, {replicated}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(tuple->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));\n } else {\n for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, TupleForwardPassAndBackWardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %tuple {\n %param0 = f32[256,2]{1,0} parameter(0),\n sharding={manual metadata={op_name=\"a\"}}\n %param1 = f32[256,2]{1,0} parameter(1),\n sharding={devices=[1,2]0,1 metadata={op_name=\"b\"}}\n %constant = s32[1,2]{1,0} constant({{0,1}})\n %gather = f32[1,32,2]{2,1,0} gather(param0, constant), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={32,2}\n %tuple = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) tuple(\n %gather, %param1)\n ROOT %copy = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) copy(%tuple)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(tuple, nullptr);\n EXPECT_THAT(tuple, op::Sharding(\"{{manual}, {devices=[1,2]0,1}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(tuple->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, TupleShapedBackWardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%param), index=0\n %after-all = token[] after-all()\n %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1\n %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1\n %data = f32[] get-tuple-element(%recv-done), index=0\n ROOT %tuple = (u32[], f32[]) tuple(%count, %data)\n}\nENTRY %entry {\n %zero = u32[] constant(0), sharding={replicated metadata={op_name=\"a\"}}\n %p0 = f32[] parameter(0), sharding={manual metadata={op_name=\"b\"}}\n %tuple = (u32[], f32[]) tuple(%zero, %p0)\n %while = (u32[], f32[]) while(%tuple), body=%body, condition=%cond,\n sharding={{manual metadata={op_name=\"c\"}},\n {manual metadata={op_name=\"d\"}}}\n ROOT %result = f32[] get-tuple-element(%while), index=1\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(tuple, nullptr);\n EXPECT_THAT(tuple, op::Sharding(\"{{manual}, {manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(tuple->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"c\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"d\")}));\n } else {\n for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartiallyManualTupleWithRepeatedOperandsBackWardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (s32[], s32[], s32[]) parameter(0)\n %count.cond = s32[] get-tuple-element(%vars.cond), index=0\n %limit = s32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (s32[], s32[], s32[]) parameter(0)\n %count = s32[] get-tuple-element(%param), index=0\n %lhs = s32[] get-tuple-element(%param), index=1\n %rhs = s32[] get-tuple-element(%param), index=2\n %add = s32[] add(%lhs, %rhs)\n ROOT %tuple = (s32[], s32[], s32[]) tuple(%count, %lhs, %add)\n}\nENTRY %entry {\n %zero = s32[] constant(0)\n %p0 = s32[] parameter(0), sharding={manual metadata={op_name=\"a\"}}\n %tuple = (s32[], s32[], s32[]) tuple(%zero, %zero, %p0)\n %while = (s32[], s32[], s32[]) while(%tuple), body=%body, condition=%cond\n ROOT %copy = (s32[], s32[], s32[]) copy(%while)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = module->entry_computation()->root_instruction()->operand(0);\n ASSERT_NE(tuple, nullptr);\n EXPECT_THAT(tuple, op::Sharding(\"{{manual}, {manual}, {manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(tuple->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(tuple->sharding().tuple_elements()[2],\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ForwardConvolutionForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %lhs = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={devices=[2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %rhs = f32[3,3,13,17]{3,2,1,0} parameter(1)\n %convolution = f32[5,7,11,17]{3,2,1,0} convolution(%lhs, %rhs),\n window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f\n ROOT %copy = f32[5,7,11,17]{3,2,1,0} copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"convolution\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,2,1]0,1,2,3,4,5,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ForwardConvolutionLargeDilationForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %lhs = f32[8,64,2]{2,1,0} parameter(0),\n sharding={devices=[1,4,1]0,1,2,3 metadata={op_name=\"a\"}}\n %rhs = f32[3,2,2]{2,1,0} parameter(1)\n %convolution = f32[8,32,2]{2,1,0} convolution(%lhs, %rhs),\n window={size=3 rhs_dilate=16}, dim_labels=b0f_0io->b0f\n ROOT %copy = f32[8,32,2]{2,1,0} copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"convolution\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,4,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ForwardConvolution3DSmallKernel) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %lhs = bf16[32,32,8,7,128]{4,3,2,1,0} parameter(0),\n sharding={devices=[1,4,1,1,1]0,1,2,3 metadata={op_name=\"a\"}}\n %rhs = bf16[3,3,3,128,256]{4,3,2,1,0} parameter(1)\n %convolution = bf16[16,16,8,3,256]{4,3,2,1,0}\n convolution(bf16[32,32,8,7,128]{4,3,2,1,0} %lhs,\n bf16[3,3,3,128,256]{4,3,2,1,0} %rhs),\n window={size=3x3x3 stride=2x2x2 pad=1_1x1_1x0_0},\n dim_labels=01b2f_012io->01b2f\n ROOT %copy = bf16[16,16,8,3,256]{4,3,2,1,0} copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"convolution\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,4,1,1,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, TransposeForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0),\n sharding={devices=[2,1,2]0,1,2,3 metadata={op_name=\"a\"}}\n %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}\n ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"transpose\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2]0,2,1,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, TransposeForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0),\n sharding={devices=[2,1,2]0,1,2,3 metadata={op_name=\"a\"}}\n %shard-barrier-from = f32[7,11,13]{2,1,0} custom-call(%param), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-from), dimensions={1,2,0}\n ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"transpose\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, TransposeBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0)\n %copy = f32[7,11,13]{2,1,0} copy(%param)\n ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},\n sharding={devices=[1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1,2]0,2,1,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, TransposeBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0)\n %copy = f32[7,11,13]{2,1,0} copy(%param)\n %shard-barrier-to = f32[7,11,13]{2,1,0} custom-call(%copy), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-to), dimensions={1,2,0},\n sharding={devices=[1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, ReshapeForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[1430,1]{1,0} parameter(0),\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %reshape = f32[10,11,13]{2,1,0} reshape(%param0)\n ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReshapeForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[1430,1]{1,0} parameter(0),\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %shard-barrier-from = f32[1430,1]{1,0} custom-call(%param0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %reshape = f32[10,11,13]{2,1,0} reshape(%shard-barrier-from)\n ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[14,32] parameter(0),\n sharding={devices=[4,4]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 metadata={op_name=\"a\"}}\n %reshape = f32[7,2,2,16] reshape(%param0)\n ROOT %copy = f32[7,2,2,16] copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,1,2,2,4]0,4,8,12,1,5,9,13,2,6,10,14,3,\"\n \"7,11,15 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[12,8] parameter(0),\n sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %reshape = f32[8,12] reshape(%param0)\n ROOT %copy = f32[8,12] copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReshapeForwardPassTranspose) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[6,4,5] parameter(0), sharding={devices=[6,2,1]<=[12] metadata={op_name=\"a\"}}\n %reshape.1 = f32[2,3,20] reshape(%param0)\n %reshape.2 = f32[2,4,3,5] reshape(%param0)\n %reshape.3 = f32[20,6] reshape(%param0)\n %reshape.4 = f32[3,5,8] reshape(%param0)\n %reshape.5 = f32[10,4,3] reshape(%param0)\n %reshape.6 = f32[5,8,3] reshape(%param0)\n ROOT %tuple = tuple(%reshape.1, %reshape.2, %reshape.3, %reshape.4, %reshape.5, %reshape.6)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n std::vector> instruction_and_sharding = {\n {\"reshape.1\", \"{devices=[2,3,2]<=[12]}\"},\n {\"reshape.2\", \"{devices=[2,1,1,1,6]<=[12] last_tile_dim_replicate}\"},\n {\"reshape.3\", \"{devices=[2,1,6]<=[12] last_tile_dim_replicate}\"},\n {\"reshape.4\", \"{devices=[3,1,1,4]<=[12] last_tile_dim_replicate}\"},\n {\"reshape.5\", \"{devices=[2,1,1,6]<=[12] last_tile_dim_replicate}\"},\n {\"reshape.6\", \"{replicated}\"}};\n for (const auto& [name, sharding] : instruction_and_sharding) {\n auto* instruction = FindInstruction(module.get(), name);\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(sharding));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ReshapeBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[2002,1]{1,0} parameter(0)\n %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)\n ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),\n sharding={devices=[2,1,1]0,1 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReshapeBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[2002,1]{1,0} parameter(0)\n %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)\n %shard-barrier-to = f32[2002,1]{1,0} custom-call(%copy), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%shard-barrier-to),\n sharding={devices=[2,1,1]0,1 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, PadForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %pad {\n %input = f32[11,17]{1,0} parameter(0),\n sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"a\"}}\n %pad_value = f32[] parameter(1)\n %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2\n ROOT %copy = f32[27,51]{1,0} copy(%pad)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"pad\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PadBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %pad {\n %input = f32[11,17]{1,0} parameter(0)\n %copy = f32[11,17]{1,0} copy(%input)\n %pad_value = f32[] parameter(1)\n %pad = f32[27,51]{1,0} pad(%copy, %pad_value), padding=2_4_1x1_1_2,\n sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"a\"}}\n ROOT %result = f32[27,51]{1,0} copy(%pad)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialReplicatedPadForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %pad {\n %input = f32[11,17]{1,0} parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %pad_value = f32[] parameter(1)\n %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2\n ROOT %copy = f32[27,51]{1,0} copy(%pad)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"pad\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ShardedPreferredOverReplicated) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %replicated {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),\n sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name=\"b\"}}\n %copy.1 = f32[5,7,11,13]{3,2,1,0} copy(%param1)\n %add = f32[5,7,11,13]{3,2,1,0} add(%copy, %copy.1)\n ROOT %copy.2 = f32[5,7,11,13]{3,2,1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* copy = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(copy, nullptr);\n EXPECT_THAT(copy, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n auto* copy1 = FindInstruction(module.get(), \"copy.1\");\n ASSERT_NE(copy1, nullptr);\n EXPECT_THAT(copy1, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n auto* add = FindInstruction(module.get(), \"add\");\n ASSERT_NE(add, nullptr);\n EXPECT_THAT(add, op::Sharding(\"{devices=[1,2,2,1]0,1,2,3}\"));\n for (const HloSharding& sharding :\n {copy->sharding(), copy1->sharding(), add->sharding()}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialReplicateReshapeForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[1430,1]{1,0} parameter(0),\n sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %reshape = f32[10,11,13]{2,1,0} reshape(%param0)\n ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialReplicateReshapeBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[2002,1]{1,0} parameter(0)\n %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)\n ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),\n sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DontShardTuplesIfAllInputIsMaximal) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %tuple {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),\n sharding={maximal device=0 metadata={op_name=\"a\"}}\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),\n sharding={maximal device=1 metadata={op_name=\"b\"}}\n %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(\n %param0, %param1)\n ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n EXPECT_EQ(changed,\n !GetParam().propagate_metadata && !GetParam().clear_metadata);\n auto* instruction = FindInstruction(module.get(), \"tuple\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::NoSharding());\n}\nTEST_P(ParameterizedMetadataTest, ValidConvolution) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY conv {\n %lhs = f32[13,17,19]{2,1,0} parameter(0),\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[19,5,19]{2,1,0} parameter(1)\n %conv = f32[13,13,19]{2,1,0} convolution(%lhs, %rhs),\n window={size=5}, dim_labels=b0f_i0o->b0f\n ROOT %tuple = (f32[13,13,19]{2,1,0}) tuple(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, StridedSlice) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %slice {\n %param = f32[17,13]{1,0} parameter(0),\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}\n ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"slice\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialReplicatedStridedSlice) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %slice {\n %param = f32[17,13]{1,0} parameter(0),\n sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}\n ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"slice\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%add (lhs: f32[], rhs: f32[]) -> f32[] {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce_window {\n %param = f32[13,17]{1,0} parameter(0)\n %param.copy = f32[13,17]{1,0} copy(%param)\n %init = f32[] parameter(1)\n ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%param.copy, %init),\n window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* param_copy = FindInstruction(module.get(), \"param.copy\");\n ASSERT_NE(param_copy, nullptr);\n EXPECT_THAT(param_copy, op::Sharding(\"{devices=[2,1]0,1}\"));\n auto* reduce_window = FindInstruction(module.get(), \"reduce-window\");\n ASSERT_NE(reduce_window, nullptr);\n EXPECT_THAT(reduce_window, op::Sharding(\"{devices=[2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(param_copy->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(reduce_window->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(param_copy->sharding(), ShardingMetadata({}));\n EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\n%add (lhs: f32[], rhs: f32[]) -> f32[] {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %reduce_window {\n %param = f32[13,17]{1,0} parameter(0)\n %param.copy = f32[13,17]{1,0} copy(%param)\n %init = f32[] parameter(1)\n %shard-barrier-to = f32[13,17]{1,0} custom-call(%param.copy), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%shard-barrier-to, %init),\n window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* param_copy = FindInstruction(module.get(), \"param.copy\");\n ASSERT_NE(param_copy, nullptr);\n EXPECT_FALSE(param_copy->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, VariadicReduceWindowBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\n%add (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {\n %a = f32[] parameter(0)\n %b = s32[] parameter(1)\n %c = f32[] parameter(2)\n %d = s32[] parameter(3)\n %add.0 = f32[] add(%a, %c)\n %add.1 = s32[] add(%b, %d)\n ROOT %t = tuple(%add.0, %add.1)\n}\nENTRY %reduce_window {\n %param.0 = f32[13,17]{1,0} parameter(0)\n %param.0.copy = f32[13,17]{1,0} copy(%param.0)\n %param.1 = s32[13,17]{1,0} parameter(1)\n %param.1.copy = s32[13,17]{1,0} copy(%param.1)\n %init.0 = f32[] parameter(2)\n %init.1 = s32[] parameter(3)\n ROOT %reduce-window = (f32[7,17]{1,0}, s32[7,17]{1,0}) reduce-window(%param.0.copy, %param.1.copy, %init.0, %init.1),\n window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,\n sharding={{devices=[2,1]0,1 metadata={op_name=\"a\"}}, {devices=[2,1]0,1 metadata={op_name=\"b\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* param_0_copy = FindInstruction(module.get(), \"param.0.copy\");\n ASSERT_NE(param_0_copy, nullptr);\n EXPECT_THAT(param_0_copy, op::Sharding(\"{devices=[2,1]0,1}\"));\n auto* param_1_copy = FindInstruction(module.get(), \"param.1.copy\");\n ASSERT_NE(param_1_copy, nullptr);\n EXPECT_THAT(param_1_copy, op::Sharding(\"{devices=[2,1]0,1}\"));\n auto* reduce_window = FindInstruction(module.get(), \"reduce-window\");\n ASSERT_NE(reduce_window, nullptr);\n EXPECT_THAT(reduce_window,\n op::Sharding(\"{{devices=[2,1]0,1}, {devices=[2,1]0,1}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(param_0_copy->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(param_1_copy->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(reduce_window->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(reduce_window->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({}));\n EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({}));\n EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReplicatedConvolutionLhs) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY conv {\n %lhs = f32[3,2,3]{2,1,0} parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %rhs = f32[2,2,1]{2,1,0} parameter(1)\n %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),\n window={size=1}, dim_labels=bf0_oi0->bf0\n ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs = FindInstruction(module.get(), \"lhs\");\n ASSERT_NE(lhs, nullptr);\n EXPECT_THAT(lhs, op::Sharding(\"{replicated}\"));\n auto* conv = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(conv, nullptr);\n EXPECT_THAT(conv, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(conv->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(lhs->sharding(), ShardingMetadata({}));\n EXPECT_THAT(conv->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvolutionShardedFeature) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY conv {\n %lhs = f32[3,2,3]{2,1,0} parameter(0),\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[2,2,1]{2,1,0} parameter(1)\n %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),\n window={size=1}, dim_labels=bf0_oi0->bf0\n ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvolutionDifferentDimensionNumbers) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY conv {\n %lhs = f32[8,16,512] parameter(0),\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[8,2,512] parameter(1)\n %conv = f32[3,512,512] convolution(%lhs, %rhs),\n window={size=2 stride=5},\n dim_labels=f0b_i0o->0bf\n ROOT %tuple = (f32[3,512,512]) tuple(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, Concatenate) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %concat {\n %param.0 = f32[5,7] parameter(0),\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %param.1 = f32[5,9] parameter(1),\n sharding={devices=[2,1]0,1 metadata={op_name=\"b\"}}\n %concat = f32[5,16] concatenate(%param.0, %param.1),\n dimensions={1}\n ROOT %tuple = (f32[5,16]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"concat\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConcatenateForwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %concat {\n %param.0 = f32[5,7] parameter(0),\n sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n %param.1 = f32[5,9] parameter(1),\n sharding={devices=[2,1]0,1 metadata={op_name=\"b\"}}\n %shard-barrier-from.0 = f32[5,7] custom-call(%param.0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %shard-barrier-from.1 = f32[5,9] custom-call(%param.1), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %concat = f32[5,16] concatenate(%shard-barrier-from.0, %shard-barrier-from.1),\n dimensions={1}\n ROOT %tuple = (f32[5,16]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"concat\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, ConcatenateBackwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %concat {\n %param.0 = f32[5,7] parameter(0)\n %copy.0 = f32[5,7] copy(%param.0)\n %param.1 = f32[5,9] parameter(1)\n %copy.1 = f32[5,9] copy(%param.1)\n %shard-barrier-to = f32[5,9] custom-call(%copy.1), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %concat = f32[5,16] concatenate(%copy.0, %shard-barrier-to),\n dimensions={1}, sharding={devices=[2,1]0,1 metadata={op_name=\"a\"}}\n ROOT %tuple = (f32[5,16]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"copy.1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, TupleBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %tuple {\n %param.0 = f32[1] parameter(0)\n %param.1 = f32[3] parameter(1)\n %copy.0 = f32[1] copy(%param.0)\n %copy.1 = f32[3] copy(%param.1)\n ROOT %tuple = (f32[1], f32[3]) tuple(%copy.0, %copy.1),\n sharding={{replicated metadata={op_name=\"a\"}},\n {devices=[2]0,1 metadata={op_name=\"b\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* copy0 = FindInstruction(module.get(), \"copy.0\");\n ASSERT_NE(copy0, nullptr);\n EXPECT_THAT(copy0, op::Sharding(\"{replicated}\"));\n auto* copy1 = FindInstruction(module.get(), \"copy.1\");\n ASSERT_NE(copy1, nullptr);\n EXPECT_THAT(copy1, op::Sharding(\"{devices=[2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(copy0->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(copy1->sharding(), ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(copy0->sharding(), ShardingMetadata({}));\n EXPECT_THAT(copy1->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, AllReduce) {\n const char* const hlo_string = R\"(\nHloModule module\n%add (lhs: f32[], rhs: f32[]) -> f32[] {\n %add_lhs = f32[] parameter(0)\n %add_rhs = f32[] parameter(1)\n ROOT %add = f32[] add(f32[] %add_lhs, f32[] %add_rhs)\n}\nENTRY %entry {\n %param.0 = f32[3] parameter(0)\n %param.1 = f32[3] parameter(1)\n %copy_f_t = f32[3] copy(%param.1),\n sharding={devices=[2]0,1 metadata={op_name=\"a\"}}\n %crs_f.tiled = f32[3] all-reduce(%copy_f_t), to_apply=%add\n %crs_f.none = f32[3] all-reduce(%copy_f_t), to_apply=%add,\n channel_id=1\n %crs_b.replicated = f32[3] all-reduce(%param.0), to_apply=%add\n %copy_b_r = f32[3] copy(%crs_b.replicated),\n sharding={replicated metadata={op_name=\"b\"}}\n ROOT %tuple = (f32[3], f32[3], f32[3]) tuple(\n %crs_f.tiled, crs_f.none, %copy_b_r)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* crs_f_tiled = FindInstruction(module.get(), \"crs_f.tiled\");\n ASSERT_NE(crs_f_tiled, nullptr);\n EXPECT_THAT(crs_f_tiled, op::Sharding(\"{devices=[2]0,1}\"));\n auto* crs_f_none = FindInstruction(module.get(), \"crs_f.none\");\n ASSERT_NE(crs_f_none, nullptr);\n EXPECT_THAT(crs_f_none, op::Sharding(\"{devices=[2]0,1}\"));\n auto* crs_b_replicated = FindInstruction(module.get(), \"crs_b.replicated\");\n ASSERT_NE(crs_b_replicated, nullptr);\n EXPECT_THAT(crs_b_replicated, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(crs_f_tiled->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(crs_b_replicated->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({}));\n EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, While) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[10,10]) parameter(0)\n %count.cond = u32[] get-tuple-element((u32[], f32[10,10]) %vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT\n}\n%body {\n %vars = (u32[], f32[10,10]) parameter(0)\n %count = u32[] get-tuple-element(%vars), index=0\n %acc = f32[10,10] get-tuple-element((u32[], f32[10,10]) %vars), index=1\n %one = u32[] constant(1)\n %count.1 = u32[] add(u32[] %count, u32[] %one), sharding={replicated}\n %acc.1 = f32[10,10] add(f32[10,10] %acc, f32[10,10] %acc)\n ROOT %tuple = (u32[], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc.1)\n}\nENTRY %entry {\n %p0 = f32[10,10] parameter(0)\n %p0.copy = f32[10,10] copy(f32[10,10] %p0)\n %p1 = f32[10,10] parameter(1)\n %zero = u32[] constant(0)\n %init = (u32[], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy)\n %while = (u32[], f32[10,10]) while((u32[], f32[10,10]) %init),\n body=%body, condition=%cond\n %res = f32[10,10] get-tuple-element((u32[], f32[10,10]) %while), index=1\n %prev = f32[10,10] get-tuple-element((u32[], f32[10,10]) %init), index=1\n %res.1 = f32[10,10] multiply(f32[10,10] %res, %prev)\n ROOT %res_tuple = (f32[10,10]) tuple(f32[10,10] %res.1)\n})\";\n auto while_is_sharded =\n [this](HloModule* module, const HloSharding& sharding,\n absl::Span> sharding_metadata) {\n if (GetParam().clear_metadata) {\n ClearMetadata(module);\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module));\n EXPECT_TRUE(changed);\n auto while_instr = FindInstruction(module, \"while\");\n EXPECT_NE(nullptr, while_instr);\n std::vector instructions{\n while_instr, while_instr->while_body()->root_instruction(),\n while_instr->while_body()->parameter_instruction(0),\n while_instr->while_condition()->parameter_instruction(0)};\n for (auto instr : instructions) {\n ASSERT_TRUE(instr->has_sharding());\n EXPECT_EQ(sharding, instr->sharding());\n ASSERT_EQ(instr->sharding().tuple_elements().size(),\n sharding_metadata.size());\n for (int i = 0, e = sharding_metadata.size(); i < e; ++i) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instr->sharding().tuple_elements()[i],\n ShardingMetadata(sharding_metadata[i]));\n } else {\n EXPECT_THAT(instr->sharding().tuple_elements()[i],\n ShardingMetadata({}));\n }\n }\n }\n };\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto body_root = FindInstruction(module.get(), \"tuple\");\n EXPECT_NE(nullptr, body_root);\n auto sharding = ParseSharding(\n \"{{replicated metadata={op_name=\\\"b\\\"}}, \"\n \"{devices=[2,1]0,1 metadata={op_name=\\\"c\\\"}}}\")\n .value();\n body_root->set_sharding(sharding);\n while_is_sharded(module.get(), sharding.WithoutMetadata(),\n {{CreateMetadata(\"b\")}, {CreateMetadata(\"c\")}});\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto acc_1 = FindInstruction(module.get(), \"acc.1\");\n EXPECT_NE(nullptr, acc_1);\n acc_1->set_sharding(\n ParseSharding(\"{devices=[2,1]0,1 metadata={op_name=\\\"b\\\"}}\").value());\n while_is_sharded(\n module.get(),\n ParseSharding(\"{{replicated}, {devices=[2,1]0,1}}\").value(),\n {{}, {CreateMetadata(\"b\")}});\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto acc_1 = FindInstruction(module.get(), \"acc.1\");\n EXPECT_NE(nullptr, acc_1);\n acc_1->set_sharding(\n ParseSharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate \"\n \"metadata={op_name=\\\"b\\\"}}\")\n .value());\n auto p0 = FindInstruction(module.get(), \"p0\");\n p0->set_sharding(\n ParseSharding(\"{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate \"\n \"metadata={op_name=\\\"c\\\"}}\")\n .value());\n while_is_sharded(module.get(),\n ParseSharding(\"{{replicated}, \"\n \"{devices=[2,2]0,1,2,3}}\")\n .value(),\n {{}, {CreateMetadata(\"c\"), CreateMetadata(\"b\")}});\n }\n}\nTEST_F(ShardingPropagationTest, PropagateShardingInWhileCondition) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %vars = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%vars), index=0\n %acc = f32[] get-tuple-element(%vars), index=1\n %one = u32[] constant(1)\n %count.1 = u32[] add(u32[] %count, u32[] %one)\n %acc.1 = f32[] add(f32[] %acc, f32[] %acc)\n ROOT %tuple = (u32[], f32[]) tuple(%count.1, %acc.1)\n}\nENTRY %entry {\n %p0 = f32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}\n %zero = u32[] constant(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}\n %init = (u32[], f32[]) tuple(%zero, %p0)\n ROOT %while = (u32[], f32[]) while(%init), body=%body, condition=%cond\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, false,\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n HloSharding single_sharding =\n ParseSharding(\"{devices=[2,2]<=[4] last_tile_dims={manual, replicated}}\")\n .value();\n HloSharding tuple_sharding = HloSharding::SingleTuple(\n module->entry_computation()->root_instruction()->shape(),\n single_sharding);\n for (const HloComputation* computation : module->computations()) {\n for (const HloInstruction* instruction : computation->instructions()) {\n EXPECT_TRUE(instruction->has_sharding());\n EXPECT_EQ(instruction->sharding(), instruction->shape().IsTuple()\n ? tuple_sharding\n : single_sharding);\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, WhileGetShardingFromRecvInBody) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%param), index=0\n %after-all = token[] after-all()\n %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,\n sharding={{maximal device=1 metadata={op_name=\"a\"}}, \n {maximal device=1}, {maximal device=1}}\n %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1\n %data = f32[] get-tuple-element(%recv-done), index=0\n ROOT %tuple = (u32[], f32[]) tuple(%count, %data)\n}\nENTRY %entry {\n %p0 = f32[] parameter(0)\n %zero = u32[] constant(0)\n %init = (u32[], f32[]) tuple(%zero, %p0)\n %while = (u32[], f32[]) while(%init), body=%body, condition=%cond\n ROOT %result = f32[] get-tuple-element(%while), index=1\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n EXPECT_EQ(changed,\n !GetParam().propagate_metadata && !GetParam().clear_metadata);\n auto sharding =\n ParseSharding(\"{{maximal device=1}, {maximal device=1}}\").value();\n auto while_instr = FindInstruction(module.get(), \"while\");\n ASSERT_NE(nullptr, while_instr);\n std::vector instructions{\n while_instr, while_instr->while_body()->root_instruction(),\n while_instr->while_body()->parameter_instruction(0),\n while_instr->while_condition()->parameter_instruction(0)};\n for (auto instr : instructions) {\n ASSERT_TRUE(instr->has_sharding());\n EXPECT_EQ(sharding, instr->sharding());\n for (const HloSharding& sub_sharding : instr->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyBeforeRecv) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%param), index=0,\n sharding={maximal device=0 metadata={op_name=\"a\"}}\n %after-all = token[] after-all()\n %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,\n sharding={{maximal device=1 metadata={op_name=\"b\"}}, \n {maximal device=1}, {maximal device=1}}\n %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1\n %data = f32[] get-tuple-element(%recv-done), index=0\n ROOT %tuple = (u32[], f32[]) tuple(%count, %data)\n}\nENTRY %entry {\n %p0 = f32[] parameter(0)\n %zero = u32[] constant(0)\n %init = (u32[], f32[]) tuple(%zero, %p0)\n %while = (u32[], f32[]) while(%init), body=%body, condition=%cond\n ROOT %result = f32[] get-tuple-element(%while), index=1\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n auto result =\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get());\n EXPECT_THAT(result.status().message(),\n ::testing::HasSubstr(\n \"Instruction: count is on device: 0, which conflicts with \"\n \"device: 1 of channel instruction: recv\"));\n}\nTEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyAfterRecv) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%param), index=0\n %after-all = token[] after-all()\n %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,\n sharding={{maximal device=1 metadata={op_name=\"a\"}}, \n {maximal device=1}, {maximal device=1}}\n %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1\n %data = f32[] get-tuple-element(%recv-done), index=0,\n sharding={maximal device=0 metadata={op_name=\"b\"}}\n ROOT %tuple = (u32[], f32[]) tuple(%count, %data)\n}\nENTRY %entry {\n %p0 = f32[] parameter(0)\n %zero = u32[] constant(0)\n %init = (u32[], f32[]) tuple(%zero, %p0)\n %while = (u32[], f32[]) while(%init), body=%body, condition=%cond\n ROOT %result = f32[] get-tuple-element(%while), index=1\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n auto result =\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get());\n EXPECT_THAT(result.status().message(),\n ::testing::HasSubstr(\n \"Instruction: data is on device: 0, which conflicts with \"\n \"device: 1 of channel instruction: recv\"));\n}\nTEST_P(ParameterizedMetadataTest, WhileConflictingShardingOnWhileInstruction) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], f32[]) parameter(0)\n %count = u32[] get-tuple-element(%param), index=0\n %after-all = token[] after-all()\n %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,\n sharding={{maximal device=1 metadata={op_name=\"a\"}}, \n {maximal device=1}, {maximal device=1}}\n %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1\n %data = f32[] get-tuple-element(%recv-done), index=0\n ROOT %tuple = (u32[], f32[]) tuple(%count, %data)\n}\nENTRY %entry {\n %p0 = f32[] parameter(0)\n %zero = u32[] constant(0)\n %init = (u32[], f32[]) tuple(%zero, %p0)\n %while = (u32[], f32[]) while(%init), body=%body, condition=%cond,\n sharding={{maximal device=0 metadata={op_name=\"b\"}},{maximal device=0}}\n ROOT %result = f32[] get-tuple-element(%while), index=1\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n auto result =\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get());\n EXPECT_THAT(result.status().message(),\n ::testing::HasSubstr(\n \"Instruction: while is on device: 0, which conflicts with \"\n \"device: 1 of channel instruction: recv\"));\n}\nTEST_P(ParameterizedMetadataTest, WhileConv) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(2)\n ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT\n}\n%body {\n %param = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)\n %i0 = s32[] constant(0)\n %count = u32[] get-tuple-element(%param), index=0\n %gte0 = bf16[2,2048,768]{2,1,0}\n get-tuple-element(%param), index=1\n %index = s32[] get-tuple-element(%param), index=4\n %dys = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0),\n dynamic_slice_sizes={1,2048,768}\n %kernel = bf16[2048, 768]{1,0}\n reshape(%dys)\n %lhs = bf16[128,512,2048]{2,1,0}\n get-tuple-element(%param), index=2,\n sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n %reshape = bf16[2048,768,1]{2,1,0} reshape(bf16[2048,768]{1,0} %kernel)\n %convolution = bf16[128,512,768]{2,1,0}\n convolution(bf16[128,512,2048]{2,1,0} %lhs,\n bf16[2048,768,1]{2,1,0} %reshape), window={size=1},\n dim_labels=0bf_io0->0bf, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %convolution, index)\n}\nENTRY %entry {\n %p0 = bf16[2048,768] parameter(0),\n sharding={devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}\n %p1 = bf16[128,512,2048] parameter(1),\n sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n %p2 = bf16[128,512,768] parameter(2)\n %reshape0 = bf16[1,2048,768] reshape(%p0)\n %concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0}\n %zero = u32[] constant(0)\n %p3 = s32[] parameter(3)\n %init = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3)\n %while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond\n ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3, sharding={replicated}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* kernel = FindInstruction(module.get(), \"kernel\");\n ASSERT_NE(kernel, nullptr);\n EXPECT_THAT(kernel, op::Sharding(\"{devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,\"\n \"7,9,11,13,15 last_tile_dim_replicate}\"));\n}\nTEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = bf16[16,2048,768] parameter(0),\n sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}\n %concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}\n %add = bf16[32,2048,768] add(%concat, %concat),\n sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n ROOT %result = bf16[32,2048,768] copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* kernel = FindInstruction(module.get(), \"concat\");\n ASSERT_NE(kernel, nullptr);\n EXPECT_THAT(kernel, op::Sharding(\"{devices=[8,1,2]0,1,2,3,4,5,6,7,8,\"\n \"9,10,11,12,13,14,15}\"));\n}\nTEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = bf16[16,2048,768] parameter(0),\n sharding={devices=[1,2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}\n %concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}\n %add = bf16[32,2048,768] add(%concat, %concat),\n sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n ROOT %result = bf16[32,2048,768] copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* kernel = FindInstruction(module.get(), \"concat\");\n ASSERT_NE(kernel, nullptr);\n EXPECT_THAT(kernel, op::Sharding(\"{devices=[8,1,2]0,1,2,3,4,5,6,7,8,\"\n \"9,10,11,12,13,14,15}\"));\n}\nTEST_P(ParameterizedMetadataTest,\n DoNotPassThroughDynamicSliceAtFirstIteration) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = bf16[64,2048,768] parameter(0),\n sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}\n %p1 = s32[] parameter(1)\n %i0 = s32[] constant(0)\n %dys = bf16[32,2048,768] dynamic-slice(%p0, s32[] %p1, s32[] %i0, s32[] %i0),\n dynamic_slice_sizes={32,2048,768}\n %add = bf16[32,2048,768] add(%dys, %dys),\n sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}\n ROOT %result = bf16[32,2048,768] copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* kernel = FindInstruction(module.get(), \"dys\");\n ASSERT_NE(kernel, nullptr);\n EXPECT_THAT(kernel, op::Sharding(\"{devices=[8,1,2]0,1,2,3,4,5,6,7,8,\"\n \"9,10,11,12,13,14,15}\"));\n}\nTEST_P(ParameterizedMetadataTest, Dot) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %param.0 = f32[8,256,128] parameter(0)\n %param.1 = f32[8,128,512] parameter(1)\n %param.2 = f32[8,128] parameter(2)\n %p0_copy_0 = f32[8,256,128] copy(%param.0),\n sharding={devices=[1,4,1]0,1,2,3 metadata={op_name=\"a\"}}\n %p1_copy_0 = f32[8,128,512] copy(%param.1),\n sharding={devices=[1,1,4]0,1,2,3 metadata={op_name=\"b\"}}\n %p2_copy = f32[8,128] copy(%param.2)\n %dot_prop_rhs = f32[8,256,512] dot(%p0_copy_0, %p1_copy_0),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}\n %dot_prop_lhs = f32[8,512,256] dot(%p1_copy_0, %p0_copy_0),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={1}, rhs_contracting_dims={2}\n %dot_mat_vec = f32[8,256] dot(%p0_copy_0, %p2_copy),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}\n %p0_copy_1 = f32[8,256,128] copy(%param.0)\n %p1_copy_1 = f32[8,128,512] copy(%param.1)\n %dot_back_prop_rhs = f32[8,256,512] dot(%p0_copy_1, %p1_copy_1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}\n %copy_back_prop_rhs = f32[8,256,512] copy(%dot_back_prop_rhs),\n sharding={devices=[1,2,2]0,1,2,3 metadata={op_name=\"c\"}}\n ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512])\n tuple(%dot_prop_lhs, %dot_prop_rhs, %dot_mat_vec, %copy_back_prop_rhs)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* dot_prop_rhs = FindInstruction(module.get(), \"dot_prop_rhs\");\n ASSERT_NE(dot_prop_rhs, nullptr);\n EXPECT_THAT(dot_prop_rhs, op::Sharding(\"{devices=[1,1,4]0,1,2,3}\"));\n auto* dot_prop_lhs = FindInstruction(module.get(), \"dot_prop_lhs\");\n ASSERT_NE(dot_prop_lhs, nullptr);\n EXPECT_THAT(dot_prop_lhs, op::Sharding(\"{devices=[1,4,1]0,1,2,3}\"));\n auto* dot_mat_vec = FindInstruction(module.get(), \"dot_mat_vec\");\n ASSERT_NE(dot_mat_vec, nullptr);\n EXPECT_THAT(dot_mat_vec, op::Sharding(\"{devices=[1,4]0,1,2,3}\"));\n auto* p0_copy_1 = FindInstruction(module.get(), \"p0_copy_1\");\n ASSERT_NE(p0_copy_1, nullptr);\n EXPECT_THAT(\n p0_copy_1,\n op::Sharding(\"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n auto* p1_copy_1 = FindInstruction(module.get(), \"p1_copy_1\");\n ASSERT_NE(p1_copy_1, nullptr);\n EXPECT_THAT(\n p1_copy_1,\n op::Sharding(\"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}\"));\n auto* dot_back_prop_rhs = FindInstruction(module.get(), \"dot_back_prop_rhs\");\n ASSERT_NE(dot_back_prop_rhs, nullptr);\n EXPECT_THAT(dot_back_prop_rhs, op::Sharding(\"{devices=[1,2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(dot_prop_rhs->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(dot_prop_lhs->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(dot_mat_vec->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(p0_copy_1->sharding(), ShardingMetadata({CreateMetadata(\"c\")}));\n EXPECT_THAT(p1_copy_1->sharding(), ShardingMetadata({CreateMetadata(\"c\")}));\n EXPECT_THAT(dot_back_prop_rhs->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n for (HloInstruction* instruction :\n {dot_prop_rhs, dot_prop_lhs, dot_mat_vec, p0_copy_1, p1_copy_1,\n dot_back_prop_rhs}) {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, DotTiledBatchDim) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0)\n %p1 = f32[8,512,128] parameter(1)\n %add = f32[8,256,512] add(%p0, %p0)\n %dot = f32[8,256,128] dot(%add, %p1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={1}\n %res = f32[8,32768] reshape(%dot),\n sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"a\"}}\n ROOT %tuple = (f32[8,32768]) tuple(%res)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DotMergeOperands) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0),\n sharding={devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %p1 = f32[8,128,512] parameter(1),\n sharding={devices=[2,2,1,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %dot = f32[8,256,128] dot(%p0, %p1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT %copy = f32[8,256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"dot\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,2]0,1,2,3,4,5,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DotMergeOperands2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %p1 = f32[8,128,512] parameter(1),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name=\"b\"}}\n %dot = f32[8,256,128] dot(%p0, %p1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT %copy = f32[8,256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"dot\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DotMergeOperands3) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[256,512] parameter(0),\n sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %p1 = f32[128,512] parameter(1),\n sharding={devices=[4,2]0,4,2,6,3,7,1,5 metadata={op_name=\"b\"}}\n %dot = f32[256,128] dot(%p0, %p1),\n lhs_contracting_dims={1}, rhs_contracting_dims={1}\n ROOT %copy = f32[256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"dot\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,4]0,2,3,1,4,6,7,5}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ForwardDotWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %p1 = f32[8,128,512] parameter(1)\n %shard-barrier-from = f32[8,256,512] custom-call(%p0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %dot = f32[8,256,128] dot(%shard-barrier-from, %p1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2}\n ROOT %copy = f32[8,256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"dot\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, BackwardDotWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %p1 = f32[8,128,512] parameter(1)\n %copy1 = f32[8,128,512] copy(%p1)\n %shard-barrier-to = f32[8,128,512] custom-call(%copy1), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %dot = f32[8,256,128] dot(%p0, %shard-barrier-to),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2},\n sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n ROOT %copy = f32[8,256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"copy1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTest, BackwardDotFromContracting) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,256,512] parameter(0),\n sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name=\"a\"}}\n %p1 = f32[8,128,512] parameter(1)\n %copy1 = f32[8,128,512] copy(%p1)\n %dot = f32[8,256,128] dot(%p0, %copy1),\n lhs_batch_dims={0}, rhs_batch_dims={0},\n lhs_contracting_dims={2}, rhs_contracting_dims={2},\n sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n ROOT %copy = f32[8,256,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,2]0,1,2,3,4,5,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\"), CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, BackwardDotFromContractingWithManual) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %dot {\n %p0 = f32[8,512] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n %p1 = f32[512,128] parameter(1)\n %copy1 = f32[512,128] copy(%p1)\n %dot = f32[8,128] dot(%p0, %copy1),\n lhs_batch_dims={}, rhs_batch_dims={},\n lhs_contracting_dims={1}, rhs_contracting_dims={0},\n sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name=\"b\"}}\n ROOT %copy = f32[8,128] copy(%dot)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsForward) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %lhs = f32[128,1,1,1001] parameter(0),\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[1,1,1024,1001] parameter(1),\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"b\"}}\n %convolution = f32[128,1,1,1024] convolution(%lhs, %rhs),\n window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f\n ROOT %copy = f32[128,1,1,1024] copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"convolution\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,1,2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvAsDotForwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %lhs = f32[128,1,1,1001] parameter(0),\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[1,1,1024,1001] parameter(1),\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"b\"}}\n %shard-barrier-from = f32[1,1,1024,1001] custom-call(%rhs), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %convolution = f32[128,1,1,1024] convolution(%lhs, %shard-barrier-from),\n window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f\n ROOT %copy = f32[128,1,1,1024] copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"convolution\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsBackward) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[128,5,5,128] parameter(0)\n %lhs = f32[128,5,5,128] copy(%p0)\n %p1 = f32[5,5,128,768] parameter(1)\n %rhs = f32[5,5,128,768] copy(%p1)\n %convolution = f32[128,1,1,768] convolution(%lhs, %rhs), window={size=5x5},\n dim_labels=b01f_01io->b01f,\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"a\"}}\n ROOT %copy = f32[128,1,1,768] copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs = FindInstruction(module.get(), \"lhs\");\n ASSERT_NE(lhs, nullptr);\n auto* rhs = FindInstruction(module.get(), \"rhs\");\n ASSERT_NE(rhs, nullptr);\n for (HloInstruction* instruction : {lhs, rhs}) {\n EXPECT_THAT(instruction, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvAsDotBackwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[128,5,5,128] parameter(0)\n %lhs = f32[128,5,5,128] copy(%p0)\n %p1 = f32[5,5,128,768] parameter(1)\n %rhs = f32[5,5,128,768] copy(%p1)\n %shard-barrier-from = f32[128,5,5,128] custom-call(%lhs), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %convolution = f32[128,1,1,768] convolution(%shard-barrier-from, %rhs), window={size=5x5},\n dim_labels=b01f_01io->b01f,\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"a\"}}\n ROOT %copy = f32[128,1,1,768] copy(%convolution)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs = FindInstruction(module.get(), \"lhs\");\n ASSERT_NE(lhs, nullptr);\n EXPECT_THAT(lhs, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTest,\n ConvolutionFilterIFOFPartitionedInputPartialReplicate) {\n const char* const hlo_string = R\"(\n HloModule module\nENTRY entry {\n %lhs = f32[128,112,112,12] parameter(0)\n %lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs),\n sharding={devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %rhs = f32[7,7,12,64] parameter(1)\n %rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs),\n sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name=\"b\"}}\n %conv = f32[128,56,56,64] convolution(\n f32[128,112,112,12] %lhs.copy,\n f32[7,7,12,64] %rhs.copy),\n window={size=7x7 stride=2x2 pad=3_3x3_3},\n dim_labels=b01f_01io->b01f\n ROOT %copy = f32[128,56,56,64] copy(conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConvolutionDataParallelism) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n p0 = f32[256,512,16,32] parameter(0), sharding={devices=[2,2,2,2]<=[16] metadata={op_name=\"lhs_sharding\"}}\n p1 = f32[512,1,12,28] parameter(1), sharding={replicated metadata={op_name=\"rhs_sharding\"}}\n conv = f32[256,512,5,5] convolution(p0, p1), window={size=12x28}, dim_labels=bf01_oi01->bf01, feature_group_count=512\n ROOT copy = f32[256,512,5,5] copy(conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[2,1,1,1,8]<=[16] last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"lhs_sharding\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ConcatFromUserUnshardedDim) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,128] parameter(0)\n %p1 = f32[8,128] parameter(1)\n %c0 = f32[8,128] copy(%p0)\n %c1 = f32[8,128] copy(%p1)\n %concat = f32[16,128] concatenate(%c0, %c1),\n dimensions={0},\n sharding={devices=[1,2]0,1 metadata={op_name=\"a\"}}\n ROOT %tuple = (f32[16,128]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* c0 = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(c0, nullptr);\n auto* c1 = FindInstruction(module.get(), \"c1\");\n ASSERT_NE(c1, nullptr);\n for (HloInstruction* instruction : {c0, c1}) {\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDim) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,128] parameter(0)\n %p1 = f32[8,128] parameter(1)\n %c0 = f32[8,128] copy(%p0)\n %c1 = f32[8,128] copy(%p1)\n %concat = f32[16,128] concatenate(%c0, %c1),\n dimensions={0},\n sharding={devices=[3,1]0,1,2 metadata={op_name=\"a\"}}\n ROOT %tuple = (f32[16,128]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* c0 = FindInstruction(module.get(), \"c0\");\n EXPECT_THAT(c0, op::Sharding(\"{devices=[2,1]0,1}\"));\n ASSERT_NE(c0, nullptr);\n auto* c1 = FindInstruction(module.get(), \"c1\");\n ASSERT_NE(c1, nullptr);\n EXPECT_THAT(c1, op::Sharding(\"{devices=[2,1]1,2}\"));\n for (HloInstruction* instruction : {c0, c1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDimMaximalOperand) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %conv {\n %p0 = f32[8,128] parameter(0)\n %p1 = f32[24,128] parameter(1)\n %c0 = f32[8,128] copy(%p0)\n %c1 = f32[24,128] copy(%p1)\n %concat = f32[32,128] concatenate(%c0, %c1),\n dimensions={0},\n sharding={devices=[4,1]0,1,2,3 metadata={op_name=\"a\"}}\n ROOT %tuple = (f32[32,128]) tuple(%concat)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* c0 = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(c0, nullptr);\n EXPECT_THAT(c0, op::NoSharding());\n auto* c1 = FindInstruction(module.get(), \"c1\");\n ASSERT_NE(c1, nullptr);\n EXPECT_THAT(c1, op::Sharding(\"{devices=[3,1]1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(c1->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(c1->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ReplicatedToSideEffecting) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry_computation {\n %const.0 = s32[] constant(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %const.1 = s32[] constant(2147483647),\n sharding={replicated metadata={op_name=\"b\"}}\n %rng = s32[4]{0} rng(%const.0, %const.1),\n distribution=rng_uniform\n ROOT %root = (s32[4]{0}) tuple(%rng)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n EXPECT_EQ(changed,\n !GetParam().propagate_metadata && !GetParam().clear_metadata);\n auto* instruction = FindInstruction(module.get(), \"rng\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::NoSharding());\n}\nTEST_P(ParameterizedMetadataTest, PartReplicatedTupleUser) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry_computation {\n %param.0 = f32[5] parameter(0)\n %param.1 = f32[7] parameter(1)\n %param.2 = f32[9] parameter(2)\n %tuple.0 = (f32[5], f32[7]) tuple(%param.0, %param.1)\n ROOT %tuple.1 = ((f32[5], f32[7]), f32[9]) tuple(%tuple.0, %param.2),\n sharding={{maximal device=0 metadata={op_name=\"a\"}},\n {replicated metadata={op_name=\"b\"}},\n {maximal device=1 metadata={op_name=\"c\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"tuple.0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{{maximal device=0}, {replicated}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n for (const HloSharding& sub_sharding :\n instruction->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, Conditional) {\n const char* const hlo_string = R\"(\nHloModule module\n%add-call {\n %x = f32[4,4] parameter(0)\n ROOT %add = f32[4,4] add(%x, %x)\n}\n%true_comp {\n %tp = (f32[3,5], f32[4,4]) parameter(0)\n %tgte.0 = f32[3,5] get-tuple-element(%tp), index=0\n %ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}\n %tgte.1 = f32[4,4] get-tuple-element(%tp), index=1\n %tadd = f32[4,4] call(%tgte.1), to_apply=%add-call\n ROOT %tr = (f32[5,3], f32[4,4]) tuple(%ttr, %tadd)\n}\n%mul-call {\n %y = f32[4,4] parameter(0)\n ROOT %mul = f32[4,4] multiply(%y, %y)\n}\n%false_comp {\n %fp = (f32[5,3], f32[4,4]) parameter(0)\n %fgte.0 = f32[5,3] get-tuple-element(%fp), index=0\n %fgte.1 = f32[4,4] get-tuple-element(%fp), index=1\n %fmul = f32[4,4] call(%fgte.1), to_apply=%mul-call\n ROOT %fr = (f32[5,3], f32[4,4]) tuple(%fgte.0, %fmul)\n}\nENTRY entry {\n %cond = pred[] parameter(0)\n %tp.0 = f32[3,5] parameter(1), sharding={devices=[1,2]0,1 metadata={op_name=\"a\"}}\n %fp.0 = f32[5,3] parameter(2), sharding={devices=[1,3]0,1,2 metadata={op_name=\"b\"}}\n %constant = f32[4] constant({1,2,3,4}), sharding={devices=[4]0,1,2,3 metadata={op_name=\"c\"}}\n %broadcast = f32[4,4] broadcast(%constant), dimensions={1}\n %add = f32[4,4] add(%broadcast, %broadcast)\n %true_param = (f32[3,5], f32[4,4]) tuple(%tp.0, %add)\n %false_param = (f32[5,3], f32[4,4]) tuple(%fp.0, %add)\n %conditional = (f32[5,3], f32[4,4]) conditional(\n %cond, %true_param, %false_param),\n true_computation=%true_comp,\n false_computation=%false_comp\n ROOT %root = f32[5,3] get-tuple-element(%conditional), index=0\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tp = FindInstruction(module.get(), \"tp\");\n auto* tgte_0 = FindInstruction(module.get(), \"tgte.0\");\n auto* ttr = FindInstruction(module.get(), \"ttr\");\n auto* tgte_1 = FindInstruction(module.get(), \"tgte.1\");\n auto* tadd = FindInstruction(module.get(), \"tadd\");\n auto* tr = FindInstruction(module.get(), \"tr\");\n auto* fp = FindInstruction(module.get(), \"fp\");\n auto* fgte_0 = FindInstruction(module.get(), \"fgte.0\");\n auto* fgte_1 = FindInstruction(module.get(), \"fgte.1\");\n auto* fmul = FindInstruction(module.get(), \"fmul\");\n auto* fr = FindInstruction(module.get(), \"fr\");\n auto* x = FindInstruction(module.get(), \"x\");\n auto* add = FindInstruction(module.get(), \"add\");\n auto* y = FindInstruction(module.get(), \"y\");\n auto* mul = FindInstruction(module.get(), \"mul\");\n auto* conditional = FindInstruction(module.get(), \"conditional\");\n const std::vector instructions(\n {tp, tgte_0, ttr, tgte_1, tadd, tr, fp, fgte_0, fgte_1, fmul, fr, x, add,\n y, mul, conditional});\n for (HloInstruction* instruction : instructions) {\n EXPECT_NE(instruction, nullptr);\n EXPECT_TRUE(instruction->has_sharding());\n }\n for (HloInstruction* instruction :\n {tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,4]0,1,2,3}\"));\n }\n for (HloInstruction* instruction : {tr, fr, conditional, fp}) {\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[1,3]0,1,2}, {devices=[1,4]0,1,2,3}}\"));\n }\n EXPECT_THAT(tp, op::Sharding(\"{{devices=[1,2]0,1}, {devices=[1,4]0,1,2,3}}\"));\n EXPECT_THAT(tgte_0, op::Sharding(\"{devices=[1,2]0,1}\"));\n EXPECT_THAT(ttr, op::Sharding(\"{devices=[2,1]0,1}\"));\n EXPECT_THAT(fgte_0, op::Sharding(\"{devices=[1,3]0,1,2}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n for (HloInstruction* instruction :\n {tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n }\n for (HloInstruction* instruction : {tr, fr, conditional, fp}) {\n const std::vector& shardings =\n instruction->sharding().tuple_elements();\n EXPECT_THAT(shardings[0], ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(shardings[1], ShardingMetadata({CreateMetadata(\"c\")}));\n }\n for (HloInstruction* instruction : {tgte_0, ttr}) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n }\n EXPECT_THAT(fgte_0->sharding(), ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n for (HloInstruction* instruction : instructions) {\n if (instruction->sharding().IsTuple()) {\n for (const HloSharding& tuple_element :\n instruction->sharding().tuple_elements()) {\n EXPECT_THAT(tuple_element, ShardingMetadata({}));\n }\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, TupleFromUser) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[13] parameter(0)\n %p1 = f32[15] parameter(1)\n %p2 = f32[17] parameter(2)\n %t0 = (f32[13], f32[15]) tuple(%p0, %p1)\n %t1 = ((f32[13], f32[15]), f32[17]) tuple(%t0, %p2)\n %gte.0 = (f32[13], f32[15]) get-tuple-element(%t1), index=0\n %gte.1 = f32[13] get-tuple-element(%gte.0), index=0\n %gte.2 = f32[15] get-tuple-element(%gte.0), index=1\n %gte.3 = f32[17] get-tuple-element(%t1), index=1\n ROOT %t2 = (f32[13], f32[15], f32[17]) tuple(%gte.1, %gte.2, %gte.3),\n sharding={{replicated metadata={op_name=\"a\"}},\n {devices=[2]0,1 metadata={op_name=\"b\"}},\n {devices=[3]1,2,3 metadata={op_name=\"c\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* t0 = FindInstruction(module.get(), \"t0\");\n ASSERT_NE(t0, nullptr);\n EXPECT_THAT(t0, op::Sharding(\"{{replicated}, {devices=[2]0,1}}\"));\n auto* t1 = FindInstruction(module.get(), \"t1\");\n ASSERT_NE(t1, nullptr);\n EXPECT_THAT(\n t1, op::Sharding(\"{{replicated}, {devices=[2]0,1}, {devices=[3]1,2,3}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(t0->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(t0->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(t1->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(t1->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n EXPECT_THAT(t1->sharding().tuple_elements()[2],\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n for (HloInstruction* instruction : {t0, t1}) {\n for (const HloSharding& sub_sharding :\n instruction->sharding().tuple_elements()) {\n EXPECT_THAT(sub_sharding, ShardingMetadata({}));\n }\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicSliceForwardPassWithBarrier) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0),\n sharding={devices=[1,1,2]0,1 metadata={op_name=\"a\"}}\n %p1 = s32[] parameter(1)\n %i0 = s32[] constant(0)\n %shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %ds = f32[11,1,15] dynamic-slice(%shard-barrier-from, %i0, %p1, %i0),\n dynamic_slice_sizes={11,1,15}\n ROOT %root = (f32[11,1,15]) tuple(%ds)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"ds\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, DynamicSliceForwardPass) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0),\n sharding={devices=[2,2,2]<=[8] metadata={op_name=\"a\"}}\n %p1 = s32[] parameter(1)\n %i0 = s32[] constant(0)\n %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),\n dynamic_slice_sizes={11,1,15}\n ROOT %root = (f32[11,1,15]) tuple(%ds)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"ds\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPass) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0)\n %p1 = s32[] parameter(1)\n %i0 = s32[] constant(0)\n %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),\n dynamic_slice_sizes={11,1,15},\n sharding={devices=[2,2,2]<=[8] metadata={op_name=\"a\"}}\n ROOT %root = (f32[11,1,15]) tuple(%ds)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPassWithBarrier) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0)\n %p1 = s32[] parameter(1)\n %i0 = s32[] constant(0)\n %shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %ds = f32[11,1,15] dynamic-slice(%shard-barrier-to, %i0, %p1, %i0),\n dynamic_slice_sizes={11,1,15},\n sharding={devices=[1,1,2]0,1 metadata={op_name=\"a\"}}\n ROOT %root = (f32[11,1,15]) tuple(%ds)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassBase) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0),\n sharding={devices=[2,2,2]<=[8] metadata={op_name=\"a\"}}\n %p1 = f32[11,1,15] parameter(1)\n %c1 = f32[11,1,15] copy(%p1)\n %p2 = s32[] parameter(2)\n %i0 = s32[] constant(0)\n %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)\n ROOT %root = (f32[11,13,15]) tuple(%dus)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* dus = FindInstruction(module.get(), \"dus\");\n ASSERT_NE(dus, nullptr);\n EXPECT_THAT(dus, op::Sharding(\"{devices=[2,2,2]<=[8]}\"));\n auto* c1 = FindInstruction(module.get(), \"c1\");\n ASSERT_NE(c1, nullptr);\n EXPECT_THAT(\n c1, op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {dus, c1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassWithBarrier) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0),\n sharding={devices=[1,1,2]0,1 metadata={op_name=\"a\"}}\n %p1 = f32[11,1,15] parameter(1)\n %c1 = f32[11,1,15] copy(%p1)\n %p2 = s32[] parameter(2)\n %i0 = s32[] constant(0)\n %shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-from, %c1, %i0, %p2, %i0)\n ROOT %root = (f32[11,13,15]) tuple(%dus)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* dus = FindInstruction(module.get(), \"dus\");\n ASSERT_NE(dus, nullptr);\n EXPECT_FALSE(dus->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassUpdate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0)\n %p1 = f32[11,1,15] parameter(1)\n %c1 = f32[11,1,15] copy(%p1),\n sharding={devices=[2,2,2]<=[8] metadata={op_name=\"a\"}}\n %p2 = s32[] parameter(2)\n %i0 = s32[] constant(0)\n %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)\n ROOT %root = (f32[11,13,15]) tuple(%dus)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* dus = FindInstruction(module.get(), \"dus\");\n ASSERT_NE(dus, nullptr);\n EXPECT_THAT(\n dus, op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n auto* c0 = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(c0, nullptr);\n EXPECT_THAT(\n c0, op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {dus, c0}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPass) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0)\n %p1 = f32[11,1,15] parameter(1)\n %c1 = f32[11,1,15] copy(%p1)\n %p2 = s32[] parameter(2)\n %i0 = s32[] constant(0)\n %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0),\n sharding={devices=[2,2,2]<=[8] metadata={op_name=\"a\"}}\n ROOT %root = (f32[11,13,15]) tuple(%dus)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* c0 = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(c0, nullptr);\n EXPECT_THAT(c0, op::Sharding(\"{devices=[2,2,2]<=[8]}\"));\n auto* c1 = FindInstruction(module.get(), \"c1\");\n ASSERT_NE(c1, nullptr);\n EXPECT_THAT(\n c1, op::Sharding(\n \"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {c0, c1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPassWithBarrier) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[11,13,15] parameter(0)\n %c0 = f32[11,13,15] copy(%p0)\n %p1 = f32[11,1,15] parameter(1)\n %c1 = f32[11,1,15] copy(%p1)\n %p2 = s32[] parameter(2)\n %i0 = s32[] constant(0)\n %shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-to, %c1, %i0, %p2, %i0),\n sharding={devices=[1,1,2]0,1 metadata={op_name=\"a\"}}\n ROOT %root = (f32[11,13,15]) tuple(%dus)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* c0 = FindInstruction(module.get(), \"c0\");\n ASSERT_NE(c0, nullptr);\n EXPECT_FALSE(c0->has_sharding());\n}\nTEST_P(ParameterizedMetadataTestWithOutput, EinsumLHSBatchPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64] parameter(0)\n %lhs.copy = f32[32,24,64] copy(%lhs),\n sharding={devices=[2,1,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[32,39296,64] parameter(1)\n %rhs.copy = f32[32,39296,64] copy(%rhs)\n %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32}\n ROOT %copy = f32[32,24,39296] copy(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata,\n {GetParam().allow_root_sharding_propagation})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* rhs_copy = FindInstruction(module.get(), \"rhs.copy\");\n ASSERT_NE(rhs_copy, nullptr);\n EXPECT_THAT(rhs_copy, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n auto* conv = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(conv, nullptr);\n EXPECT_THAT(conv, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n for (HloInstruction* instruction : {rhs_copy, conv}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n if (GetParam().allow_root_sharding_propagation) {\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[2,1,1]0,1}\"));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumOutputBatchPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64] parameter(0)\n %lhs.copy = f32[32,24,64] copy(%lhs)\n %rhs = f32[32,39296,64] parameter(1)\n %rhs.copy = f32[32,39296,64] copy(%rhs)\n %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32},\n sharding={devices=[2,1,1]0,1 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs_copy = FindInstruction(module.get(), \"lhs.copy\");\n ASSERT_NE(lhs_copy, nullptr);\n EXPECT_THAT(lhs_copy, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n auto* rhs_copy = FindInstruction(module.get(), \"rhs.copy\");\n ASSERT_NE(rhs_copy, nullptr);\n EXPECT_THAT(rhs_copy, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n for (HloInstruction* instruction : {lhs_copy, rhs_copy}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumLHSNonContractingPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,128] parameter(0)\n %lhs.copy = f32[32,24,64,128] copy(%lhs),\n sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name=\"a\"}}\n %rhs = f32[32,39296,64,1] parameter(1)\n %rhs.copy = f32[32,39296,64,1] copy(%rhs)\n %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1}\n ROOT %copy = f32[32,24,39296,128] copy(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,1,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumOutputLHSNonContractingPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,128] parameter(0)\n %lhs.copy = f32[32,24,64,128] copy(%lhs)\n %rhs = f32[32,39296,64,1] parameter(1)\n %rhs.copy = f32[32,39296,64,1] copy(%rhs)\n ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1},\n sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"lhs.copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,1,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumRHSNonContractingPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,1] parameter(0)\n %lhs.copy = f32[32,24,64,1] copy(%lhs)\n %rhs = f32[32,39296,64,128] parameter(1)\n %rhs.copy = f32[32,39296,64,128] copy(%rhs),\n sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name=\"a\"}}\n %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1,\n window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}\n ROOT %copy = f32[32,24,39296,128] copy(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,1,2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumOutputRHSNonContractingPartitioned) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,1] parameter(0)\n %lhs.copy = f32[32,24,64,1] copy(%lhs)\n %rhs = f32[32,39296,64,128] parameter(1)\n %rhs.copy = f32[32,39296,64,128] copy(%rhs)\n ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1,\n window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1},\n sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"rhs.copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,1,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumChooseLargerOperand) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,1] parameter(0)\n %lhs.copy = f32[32,24,64,1] copy(%lhs),\n sharding={devices=[1,4,1,1]0,1,2,3 metadata={op_name=\"a\"}}\n %rhs = f32[32,39296,64,128] parameter(1)\n %rhs.copy = f32[32,39296,64,128] copy(%rhs),\n sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name=\"b\"}}\n %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1,\n window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}\n ROOT %copy = f32[32,24,39296,128] copy(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,1,2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, EinsumChooseBatchFirst) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %lhs = f32[32,24,64,1] parameter(0)\n %lhs.copy = f32[32,24,64,1] copy(%lhs),\n sharding={devices=[1,2,1,1]0,1 metadata={op_name=\"a\"}}\n %rhs = f32[32,39296,64,128] parameter(1)\n %rhs.copy = f32[32,39296,64,128] copy(%rhs),\n sharding={devices=[2,1,1,1]0,1 metadata={op_name=\"b\"}}\n %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),\n dim_labels=0bf1_0oi1->0bf1,\n window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}\n ROOT %copy = f32[32,24,39296,128] copy(%conv)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"conv\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1,1,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherFromIndex) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[2,3,4] parameter(1),\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"b\"}}\n %gather = f32[3,4,9] gather(%input, %indices), offset_dims={2},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,9}\n ROOT %copy = f32[3,4,9] copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherFromIndex_PartialReplicate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9}\n ROOT %copy = f32[3,9] copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherFromDataOperand) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={devices=[1,2]0,1 metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9}\n ROOT %copy = f32[3,9] copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherFromDataOperand_PartialReplicate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9}\n ROOT %copy = f32[3,9] copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToIndex) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[3] parameter(1)\n %indices = s32[3] copy(%p1)\n ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9},\n sharding={devices=[2,1]0,1 metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToIndex_PartialReplicate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[3] parameter(1)\n %indices = s32[3] copy(%p1)\n ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9},\n sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToIndex2) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = bf16[2,4819,4] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[2,1000,2] parameter(1)\n %indices = s32[2,1000,2] copy(%p1)\n ROOT %gather = bf16[2,1000,4]\n gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),\n offset_dims={2}, collapsed_slice_dims={0,1},\n start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToIndex2_PartialReplicate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = bf16[2,4819,4] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[2,1000,2] parameter(1)\n %indices = s32[2,1000,2] copy(%p1)\n ROOT %gather = bf16[2,1000,4]\n gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),\n offset_dims={2}, collapsed_slice_dims={0,1},\n start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},\n sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToIndex3) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = bf16[2,4819,4] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[2,2,1000] parameter(1)\n %indices = s32[2,2,1000] copy(%p1)\n ROOT %gather = bf16[2,1000,4]\n gather(bf16[2,4819,4] %input, s32[2,2,1000] %indices),\n offset_dims={2}, collapsed_slice_dims={0,1},\n start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4},\n sharding={devices=[1,2,1]0,1 metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToDataOperand) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %p0 = f32[2,9] parameter(0)\n %input = f32[2,9] copy(%p0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9},\n sharding={devices=[1,2]0,1 metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToDataOperand_PartialReplicate) {\n const char* hlo_string = R\"(\nHloModule module\nENTRY entry {\n %p0 = f32[2,9] parameter(0)\n %input = f32[2,9] copy(%p0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},\n collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,\n slice_sizes={1,9},\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DataOperandToScatter) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={devices=[1,2]0,1 metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %updates = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = f32[2,9] copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DataOperandToScatter_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %updates = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = f32[2,9] copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, DataOperandToScatter_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %input.0 = f32[2,9] parameter(0),\n sharding={devices=[1,4]0,1,2,3 metadata={op_name=\"a\"}}\n %input.1 = f32[2,9] parameter(1),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %indices = s32[3] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %updates.0 = f32[3,9] parameter(3),\n sharding={replicated metadata={op_name=\"d\"}}\n %updates.1 = f32[3,9] parameter(4),\n sharding={replicated metadata={op_name=\"e\"}}\n %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[1,4]0,1,2,3}, {devices=[1,2,2]0,1,2,3 \"\n \"last_tile_dim_replicate}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, UpdateOperandToScatter) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %updates = f32[3,9] parameter(2),\n sharding={devices=[1,2]0,1 metadata={op_name=\"c\"}}\n %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = f32[2,9] copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %updates = f32[3,9] parameter(2),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"c\"}}\n %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = f32[2,9] copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %input.0 = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %input.1 = f32[2,9] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[3] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %updates.0 = f32[3,9] parameter(3),\n sharding={devices=[1,4]0,1,2,3 metadata={op_name=\"d\"}}\n %updates.1 = f32[3,9] parameter(4),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"e\"}}\n %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1\n ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[1,4] 0,1,2,3}, {devices=[1,2,2]0,1,2,3 \"\n \"last_tile_dim_replicate}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"d\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"e\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToDataOperand_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %p0 = f32[2,9] parameter(0)\n %input = f32[2,9] copy(%p0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n %updates = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"b\"}}\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"c\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToDataOperand) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %p0 = f32[2,9] parameter(0)\n %input = f32[2,9] copy(%p0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n %updates = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"b\"}}\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={devices=[1,2]0,1 metadata={op_name=\"c\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToDataOperand_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %p0 = f32[2,9] parameter(0)\n %input.0 = f32[2,9] copy(%p0)\n %p1 = f32[2,9] parameter(1)\n %input.1 = f32[2,9] copy(%p1)\n %indices = s32[3] parameter(2),\n sharding={replicated metadata={op_name=\"a\"}}\n %updates.0 = f32[3,9] parameter(3),\n sharding={replicated metadata={op_name=\"b\"}}\n %updates.1 = f32[3,9] parameter(4),\n sharding={replicated metadata={op_name=\"c\"}}\n ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={{devices=[1,4]0,1,2,3 metadata={op_name=\"d\"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"e\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"input.0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,4]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"d\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n instruction = FindInstruction(module.get(), \"input.1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"e\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n %p2 = f32[3,9] parameter(2)\n %updates = f32[3,9] copy(%p2)\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0)\n %indices = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"a\"}}\n %p2 = f32[3,9] parameter(2)\n %updates = f32[3,9] copy(%p2)\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={devices=[1,2]0,1 metadata={op_name=\"b\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %input.0 = f32[2,9] parameter(0)\n %input.1 = f32[2,9] parameter(1)\n %indices = s32[3] parameter(2),\n sharding={replicated metadata={op_name=\"a\"}}\n %p3 = f32[3,9] parameter(3)\n %updates.0 = f32[3,9] copy(%p3)\n %p4 = f32[3,9] parameter(4)\n %updates.1 = f32[3,9] copy(%p4)\n ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={{devices=[1,4]0,1,2,3 metadata={op_name=\"b\"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"c\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates.0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,4]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n instruction = FindInstruction(module.get(), \"updates.1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[3] copy(%p1)\n %updates = f32[3,9] parameter(2),\n sharding={devices=[2,1]0,1 metadata={op_name=\"c\"}}\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex2) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[1,3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[1,3] copy(%p1)\n %updates = f32[3,9] parameter(2),\n sharding={devices=[2,1]0,1 metadata={op_name=\"c\"}}\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=0,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[3] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[3] copy(%p1)\n %updates = f32[3,9] parameter(2),\n sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"c\"}}\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_RankMismatch) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[1,24,24,24,3,3] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %p1 = s32[1,24,24,24,5] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[1,24,24,24,5] copy(%p1)\n %updates = f32[1,24,24,24,3] parameter(2),\n sharding={devices=[1,2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name=\"c\"}}\n %scatter = f32[1,24,24,24,3,3] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={4},\n inserted_window_dims={0,1,2,3,4},\n scatter_dims_to_operand_dims={0,1,2,3,4},\n index_vector_dim=4,\n sharding={replicated metadata={op_name=\"d\"}}\n ROOT %copy = f32[1,24,24,24,3,3] copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2,2,1]0,1,2,3,4,5,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %input.0 = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %input.1 = f32[2,9] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %p2 = s32[3,3] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %indices = s32[3,3] copy(%p2)\n %updates.0 = f32[3,3,9] parameter(3),\n sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"d\"}}\n %updates.1 = f32[3,3,9] parameter(4),\n sharding={devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name=\"e\"}}\n ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={2},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=2,\n sharding={{replicated metadata={op_name=\"d\"}}, {replicated metadata={op_name=\"e\"}}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"d\"), CreateMetadata(\"e\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={devices=[2]0,1 metadata={op_name=\"b\"}}\n %p2 = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %updates = f32[3,9] copy(%p2)\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,1]0,1}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[3] parameter(1),\n sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %p2 = f32[3,9] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %updates = f32[3,9] copy(%p2)\n ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),\n to_apply=add,\n update_window_dims={1},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=1,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate2_PartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT sum = f32[] add(lhs, rhs)\n}\nENTRY entry {\n %input = bf16[15,8] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %indices = s32[8,1,1] parameter(1),\n sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7\n last_tile_dim_replicate metadata={op_name=\"b\"}}\n %p2 = bf16[8,1,8] parameter(2),\n sharding={replicated metadata={op_name=\"c\"}}\n %updates = bf16[8,1,8] copy(%p2)\n ROOT %scatter = bf16[15,8]{1,0} scatter(bf16[15,8] %input,\n s32[8,1,1] %indices, bf16[8,1,8] %updates),\n update_window_dims={2},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0}, index_vector_dim=2, to_apply=%add,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {\n lhs.0 = f32[] parameter(0)\n lhs.1 = f32[] parameter(1)\n rhs.0 = f32[] parameter(2)\n rhs.1 = f32[] parameter(3)\n sum.0 = f32[] add(lhs.0, rhs.0)\n sum.1 = f32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY entry {\n %input.0 = f32[2,9] parameter(0),\n sharding={replicated metadata={op_name=\"a\"}}\n %input.1 = f32[2,9] parameter(1),\n sharding={replicated metadata={op_name=\"b\"}}\n %indices = s32[3,3] parameter(2),\n sharding={devices=[2,2]0,1,2,3 metadata={op_name=\"c\"}}\n %p3 = f32[3,3,9] parameter(3),\n sharding={replicated metadata={op_name=\"d\"}}\n %updates.0 = f32[3,3,9] copy(%p3)\n %p4 = f32[3,3,9] parameter(4),\n sharding={replicated metadata={op_name=\"e\"}}\n %updates.1 = f32[3,3,9] copy(%p4)\n ROOT %scatter = (f32[2,9],f32[2,9])scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),\n to_apply=add,\n update_window_dims={2},\n inserted_window_dims={0},\n scatter_dims_to_operand_dims={0},\n index_vector_dim=2,\n sharding={replicated metadata={op_name=\"d\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"updates.0\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n instruction = FindInstruction(module.get(), \"updates.1\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2,1]0,1,2,3}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"c\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %p0 = f32[2,9] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %p1 = f32[2,9] parameter(1),\n sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %lhs = f32[2,9] copy(%p0)\n %rhs = f32[2,9] copy(%p1)\n %add = f32[2,9] add(%lhs, %rhs)\n ROOT %copy = f32[2,9] copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs = FindInstruction(module.get(), \"lhs\");\n ASSERT_NE(lhs, nullptr);\n EXPECT_THAT(lhs, op::Sharding(\"{devices=[2,2]0,2,1,3}\"));\n auto* rhs = FindInstruction(module.get(), \"rhs\");\n ASSERT_NE(rhs, nullptr);\n EXPECT_THAT(rhs, op::Sharding(\"{devices=[2,2]0,2,1,3}\"));\n auto* add = FindInstruction(module.get(), \"add\");\n ASSERT_NE(add, nullptr);\n EXPECT_THAT(add, op::Sharding(\"{devices=[2,2]0,2,1,3}\"));\n for (HloInstruction* instruction : {lhs, rhs, add}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %p0 = f32[2,9] parameter(0),\n sharding={devices=[1,2,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %p1 = f32[2,9] parameter(1),\n sharding={devices=[2,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %lhs = f32[2,9] copy(%p0)\n %rhs = f32[2,9] copy(%p1)\n %add = f32[2,9] add(%lhs, %rhs)\n ROOT %copy = f32[2,9] copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* lhs = FindInstruction(module.get(), \"lhs\");\n ASSERT_NE(lhs, nullptr);\n EXPECT_THAT(\n lhs,\n op::Sharding(\"{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* rhs = FindInstruction(module.get(), \"rhs\");\n ASSERT_NE(rhs, nullptr);\n EXPECT_THAT(\n rhs,\n op::Sharding(\"{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* add = FindInstruction(module.get(), \"add\");\n ASSERT_NE(add, nullptr);\n EXPECT_THAT(\n add,\n op::Sharding(\"{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(lhs->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n EXPECT_THAT(rhs->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n EXPECT_THAT(add->sharding(),\n ShardingMetadata({CreateMetadata(\"b\"), CreateMetadata(\"a\")}));\n } else {\n for (HloInstruction* instruction : {lhs, rhs}) {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingTransposeForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0),\n sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}\n ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"transpose\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[1,2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingTransposeBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %transpose {\n %param = f32[7,11,13]{2,1,0} parameter(0)\n %copy = f32[7,11,13]{2,1,0} copy(%param)\n ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},\n sharding={devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[2,1,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate.19), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,\n s32[2,8,4]{2,1,0} %shard-barrier-from.1), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, GatherBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %shard-barrier-to = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %shard-barrier-to,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,8,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p = FindInstruction(module.get(), \"copy.p\");\n ASSERT_NE(copy_p, nullptr);\n EXPECT_THAT(copy_p, op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromOperandToResult) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[10,3,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}\n %indices = s32[14,10,6,2] parameter(1)\n ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},\n collapsed_slice_dims={1}, operand_batching_dims={0,2},\n start_indices_batching_dims={1,0}, start_index_map={1,3},\n index_vector_dim=3, slice_sizes={1,1,1,4}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,\"\n \"3,1) last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromIndicesToResult) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[10,3,14,4] parameter(0)\n %indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}\n ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},\n collapsed_slice_dims={1}, operand_batching_dims={0,2},\n start_indices_batching_dims={1,0}, start_index_map={1,3},\n index_vector_dim=3, slice_sizes={1,1,1,4}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, GatherBackwardWithExplicitBatchDims) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY entry {\n %input = f32[10,3,14,4] parameter(0)\n %indices = s32[14,10,6,2] parameter(1)\n ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},\n collapsed_slice_dims={1}, operand_batching_dims={0,2},\n start_indices_batching_dims={1,0}, start_index_map={1,3},\n index_vector_dim=3, slice_sizes={1,1,1,4},\n sharding={devices=[2,2,2,2]<=[16]}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) \"\n \"last_tile_dim_replicate}\"));\n EXPECT_THAT(\n module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromOperandToResult) {\n const char* const hlo_string = R\"(\nHloModule module\nmin (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT min = f32[] minimum(lhs, rhs)\n}\nENTRY entry {\n %input = f32[10,6,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}\n %indices = s32[14,10,6,2] parameter(1)\n %updates = f32[14,10,6,2] parameter(2)\n ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),\n to_apply=min, update_window_dims={3}, inserted_window_dims={1},\n scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},\n scatter_indices_batching_dims={1,0}, index_vector_dim=3\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[2,2,2,2]<=[16]}\"));\n}\nTEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromIndicesToResult) {\n const char* const hlo_string = R\"(\nHloModule module\nmin (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT min = f32[] minimum(lhs, rhs)\n}\nENTRY entry {\n %input = f32[10,6,14,4] parameter(0)\n %indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}\n %updates = f32[14,10,6,2] parameter(2)\n ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),\n to_apply=min, update_window_dims={3}, inserted_window_dims={1},\n scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},\n scatter_indices_batching_dims={1,0}, index_vector_dim=3\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Sharding(\n \"{devices=[2,1,2,1,4]<=[2,2,4]T(1,0,2) last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromUpdatesToResult) {\n const char* const hlo_string = R\"(\nHloModule module\nmin (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT min = f32[] minimum(lhs, rhs)\n}\nENTRY entry {\n %input = f32[10,6,14,4] parameter(0)\n %indices = s32[14,10,6,2] parameter(1)\n %updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,2,2,2]<=[16]}\n ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),\n to_apply=min, update_window_dims={3}, inserted_window_dims={1},\n scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},\n scatter_indices_batching_dims={1,0}, index_vector_dim=3\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) \"\n \"last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, ScatterBackwardWithExplicitBatchDims) {\n const char* const hlo_string = R\"(\nHloModule module\nmin (lhs: f32[], rhs: f32[]) -> f32[] {\n lhs = f32[] parameter(0)\n rhs = f32[] parameter(1)\n ROOT min = f32[] minimum(lhs, rhs)\n}\nENTRY entry {\n %input = f32[10,6,14,4] parameter(0)\n %indices = s32[14,10,6,2] parameter(1)\n %updates = f32[14,10,6,4] parameter(2)\n ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),\n to_apply=min, update_window_dims={3}, inserted_window_dims={1},\n scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},\n scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,2,2,2]<=[16]}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true, true, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{devices=[2,2,2,2]<=[16]}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[2,2,1,1,4]<=[2,2,2,2]T(2,0,1,3) \"\n \"last_tile_dim_replicate}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(2),\n op::Sharding(\"{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,3,1) \"\n \"last_tile_dim_replicate}\"));\n}\nTEST_P(ParameterizedMetadataTest, ParallelGatherFromOperandForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelGatherFromIndexForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,\n sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %copy.p,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,8,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p = FindInstruction(module.get(), \"copy.p\");\n ASSERT_NE(copy_p, nullptr);\n EXPECT_THAT(copy_p, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n for (HloInstruction* instruction : {concatenate, copy_p}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)\n %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[4,8,2,2]{3,2,1,0} %copy.p,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[1,4,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,1,4]0,1,4,5}\"));\n auto* copy_p = FindInstruction(module.get(), \"copy.p\");\n ASSERT_NE(copy_p, nullptr);\n EXPECT_THAT(copy_p, op::Sharding(\"{devices=[4,1,1,1]0,1,4,5}\"));\n for (HloInstruction* instruction : {concatenate, copy_p}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartialShardingParallelGatherFromOperandForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartialShardingParallelGatherFromIndexForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,\n sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %copy.p,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(\n concatenate,\n op::Sharding(\n \"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* copy_p = FindInstruction(module.get(), \"copy.p\");\n ASSERT_NE(copy_p, nullptr);\n EXPECT_THAT(\n copy_p,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)\n %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[4,8,2,2]{3,2,1,0} %copy.p,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(\n concatenate,\n op::Sharding(\"{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}\"));\n auto* copy_p = FindInstruction(module.get(), \"copy.p\");\n ASSERT_NE(copy_p, nullptr);\n EXPECT_THAT(\n copy_p,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ScatterForwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %shard-barrier-from.2 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.1), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,\n s32[2,8,4]{2,1,0} %shard-barrier-from.1,\n s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.2),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_FALSE(instruction->has_sharding());\n}\nTEST_P(ParameterizedMetadataTest, ScatterBackwardPassWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %shard-barrier-to.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p0), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %shard-barrier-to.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,8,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(copy_p0, op::Sharding(\"{replicated}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(copy_p1, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterFromOperandForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterFromIndexForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,\n sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterFromUpdateForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %copy.p0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,8,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(copy_p0, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(copy_p1, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %scatter = s32[4,8,2,2]{3,2,1,0} scatter(\n s32[4,8,2,2]{3,2,1,0} %copy.p0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={1,0},\n index_vector_dim=0,\n sharding={devices=[4,1,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,1,4]0,1,4,5}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(copy_p0, op::Sharding(\"{devices=[4,1,1,1]0,1,4,5}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(copy_p1, op::Sharding(\"{devices=[1,4,1,1]0,1,4,5}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartialShardingParallelScatterFromOperandForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartialShardingParallelScatterFromIndexForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,\n sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest,\n PartialShardingParallelScatterFromUpdateForwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %copy.p0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(\n concatenate,\n op::Sharding(\n \"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(\n copy_p0,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(\n copy_p1,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass2) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %scatter = s32[4,8,2,2]{3,2,1,0} scatter(\n s32[4,8,2,2]{3,2,1,0} %copy.p0,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p1),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={1,0},\n index_vector_dim=0,\n sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(\n concatenate,\n op::Sharding(\"{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(\n copy_p0,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(\n copy_p1,\n op::Sharding(\"{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ParallelScatterFromOperandForwardPass_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {\n lhs.0 = s32[] parameter(0)\n lhs.1 = s32[] parameter(1)\n rhs.0 = s32[] parameter(2)\n rhs.1 = s32[] parameter(3)\n sum.0 = s32[] add(lhs.0, rhs.0)\n sum.1 = s32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)\n %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[8,4,2,2]{3,2,1,0} %parameter.1,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.2,\n s32[8,4,2,2]{3,2,1,0} %parameter.3),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[8,1,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,\"\n \"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ParallelScatterFromIndexForwardPass_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {\n lhs.0 = s32[] parameter(0)\n lhs.1 = s32[] parameter(1)\n rhs.0 = s32[] parameter(2)\n rhs.1 = s32[] parameter(3)\n sum.0 = s32[] add(lhs.0, rhs.0)\n sum.1 = s32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,\n sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)\n %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[8,4,2,2]{3,2,1,0} %parameter.1,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.2,\n s32[8,4,2,2]{3,2,1,0} %parameter.3),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 \"\n \"last_tile_dim_replicate},{devices=[4,1,1,1,2]0,1,4,\"\n \"5,2,3,6,7 last_tile_dim_replicate}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ParallelScatterFromUpdateForwardPass_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {\n lhs.0 = s32[] parameter(0)\n lhs.1 = s32[] parameter(1)\n rhs.0 = s32[] parameter(2)\n rhs.1 = s32[] parameter(3)\n sum.0 = s32[] add(lhs.0, rhs.0)\n sum.1 = s32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),\n sharding={devices=[1,8,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}}\n %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3),\n sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}\n %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(\n s32[8,4,2,2]{3,2,1,0} %parameter.0,\n s32[8,4,2,2]{3,2,1,0} %parameter.1,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %parameter.2,\n s32[8,4,2,2]{3,2,1,0} %parameter.3),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{devices=[1,8,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,\"\n \"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding().tuple_elements()[0],\n ShardingMetadata({CreateMetadata(\"a\")}));\n EXPECT_THAT(instruction->sharding().tuple_elements()[1],\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {\n lhs.0 = s32[] parameter(0)\n lhs.1 = s32[] parameter(1)\n rhs.0 = s32[] parameter(2)\n rhs.1 = s32[] parameter(3)\n sum.0 = s32[] add(lhs.0, rhs.0)\n sum.1 = s32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY %module {\n %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)\n %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)\n %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)\n %copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)\n %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(\n s32[8,4,2,2]{3,2,1,0} %copy.p0,\n s32[8,4,2,2]{3,2,1,0} %copy.p1,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p2,\n s32[8,4,2,2]{3,2,1,0} %copy.p3),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={{devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name=\"a\"}},\n {devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name=\"b\"}}}\n ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,8,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(copy_p0, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(\n copy_p1,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n auto* copy_p2 = FindInstruction(module.get(), \"copy.p2\");\n ASSERT_NE(copy_p2, nullptr);\n EXPECT_THAT(copy_p2, op::Sharding(\"{devices=[8,1,1,1]0,1,4,5,2,3,6,7}\"));\n auto* copy_p3 = FindInstruction(module.get(), \"copy.p3\");\n ASSERT_NE(copy_p3, nullptr);\n EXPECT_THAT(\n copy_p3,\n op::Sharding(\n \"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n for (HloInstruction* instruction : {copy_p1, copy_p3}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2_Variadic) {\n const char* const hlo_string = R\"(\nHloModule module\nadd (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {\n lhs.0 = s32[] parameter(0)\n lhs.1 = s32[] parameter(1)\n rhs.0 = s32[] parameter(2)\n rhs.1 = s32[] parameter(3)\n sum.0 = s32[] add(lhs.0, rhs.0)\n sum.1 = s32[] add(lhs.1, rhs.1)\n ROOT tuple = tuple(sum.0, sum.1)\n}\nENTRY %module {\n %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)\n %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)\n %parameter.1 = s32[4,8,2,2]{3,2,1,0} parameter(1)\n %copy.p1 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2\n %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,\n s32[1,8,4]{2,1,0} %iota2), dimensions={0}\n %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)\n %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)\n %copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)\n %scatter = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) scatter(\n s32[4,8,2,2]{3,2,1,0} %copy.p0,\n s32[4,8,2,2]{3,2,1,0} %copy.p1,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %copy.p2,\n s32[8,4,2,2]{3,2,1,0} %copy.p3),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={1,0},\n index_vector_dim=0,\n sharding={{devices=[4,1,1,1]0,1,4,5 metadata={op_name=\"a\"}},\n {devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"b\"}}}\n ROOT %copy = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* concatenate = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(concatenate, nullptr);\n EXPECT_THAT(concatenate, op::Sharding(\"{devices=[1,1,4]0,1,4,5}\"));\n auto* copy_p0 = FindInstruction(module.get(), \"copy.p0\");\n ASSERT_NE(copy_p0, nullptr);\n EXPECT_THAT(copy_p0, op::Sharding(\"{devices=[4,1,1,1]0,1,4,5}\"));\n auto* copy_p1 = FindInstruction(module.get(), \"copy.p1\");\n ASSERT_NE(copy_p1, nullptr);\n EXPECT_THAT(\n copy_p1,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n auto* copy_p2 = FindInstruction(module.get(), \"copy.p2\");\n ASSERT_NE(copy_p2, nullptr);\n EXPECT_THAT(copy_p2, op::Sharding(\"{devices=[1,4,1,1]0,1,4,5}\"));\n auto* copy_p3 = FindInstruction(module.get(), \"copy.p3\");\n ASSERT_NE(copy_p3, nullptr);\n EXPECT_THAT(\n copy_p3,\n op::Sharding(\"{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n for (HloInstruction* instruction : {copy_p1, copy_p3}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"b\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndOperandPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndIndexPassthroughFromIndicesForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,2]0,1,4,5 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{devices=[1,2,2]0,1,4,5}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{devices=[1,2,2]0,1,4,5}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n GatherMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) { \n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,2,2,1]0,4,1,5 metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[1,2,2,1]0,4,1,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{replicated}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[1,2,2,1]0,4,1,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{replicated}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedOperandPassthroughAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(gather, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n GatherMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { \n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n GatherMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %gather = s32[8,4,2,2]{3,2,1,0} gather(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},\n collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,\n slice_sizes={1,1,2,2},\n sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[1,2,2,1]0,4,1,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{replicated}\"));\n const HloInstruction* gather = FindInstruction(module.get(), \"gather\");\n ASSERT_NE(gather, nullptr);\n EXPECT_THAT(\n gather,\n op::Sharding(\"{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction : {operand, indices, gather}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndOperandPassthroughFromUpdateForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndOperandPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndIndexPassthroughFromIndexForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,2]0,1,4,5 metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{devices=[1,2,2]0,1,4,5}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndIndexPassthroughFromUpdateForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),\n sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{devices=[1,2,2]0,1,4,5}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedIndexParallelAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[1,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,1,2,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1\n %concatenate = s32[2,8,4]{2,1,0} concatenate(\n s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %concatenate,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"concatenate\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{devices=[1,2,2]0,1,4,5}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,2,1,1]0,1,4,5 }\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n ScatterMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) { \n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[1,2,2,1]0,1,4,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{replicated}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[1,2,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand, op::Sharding(\"{devices=[1,2,2,1]0,1,4,5}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(indices, op::Sharding(\"{replicated}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(scatter, op::Sharding(\"{devices=[1,2,2,1]0,1,4,5}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n ScatterMergedOperandPassthroughAndIndexPassthroughFromUpdateForwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),\n sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedOperandPassthroughAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(update, op::Sharding(\"{devices=[2,1,2,1]0,1,4,5}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { \n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(\n ParameterizedMetadataTest,\n ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndUpdateForwardPass) { \n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),\n sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest,\n ScatterMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {\n absl::string_view hlo_string = R\"(\nHloModule module\nadd (lhs: s32[], rhs: s32[]) -> s32[] {\n lhs = s32[] parameter(0)\n rhs = s32[] parameter(1)\n ROOT sum = s32[] add(lhs, rhs)\n}\nENTRY %module {\n %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)\n %arg.1 = s32[2,8,4]{2,1,0} parameter(1)\n %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)\n %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)\n %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),\n sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)\n %scatter = s32[8,4,2,2]{3,2,1,0} scatter(\n s32[8,4,2,2]{3,2,1,0} %operand,\n s32[2,8,4]{2,1,0} %indices,\n s32[8,4,2,2]{3,2,1,0} %update),\n to_apply=add,\n update_window_dims={2,3},\n inserted_window_dims={0,1},\n scatter_dims_to_operand_dims={0,1},\n index_vector_dim=0,\n sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name=\"a\"}}\n ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(\n operand,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n const HloInstruction* indices = FindInstruction(module.get(), \"indices\");\n ASSERT_NE(indices, nullptr);\n EXPECT_THAT(\n indices,\n op::Sharding(\"{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* update = FindInstruction(module.get(), \"update\");\n ASSERT_NE(update, nullptr);\n EXPECT_THAT(\n update,\n op::Sharding(\"{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}\"));\n const HloInstruction* scatter = FindInstruction(module.get(), \"scatter\");\n ASSERT_NE(scatter, nullptr);\n EXPECT_THAT(\n scatter,\n op::Sharding(\"{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}\"));\n for (const HloInstruction* instruction :\n {operand, indices, update, scatter}) {\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n }\n}\nTEST_P(ParameterizedMetadataTest, CorrectlyReplicateGatherIndex) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = bf16[1,2,2,2,8]{4,3,2,1,0} parameter(0)\n %parameter.1 = s32[1,2,2]{2,1,0} parameter(1)\n %index = s32[1,2,2]{2,1,0} copy(%parameter.1)\n %gather = bf16[1,2,2,2,8]{4,3,2,1,0} gather(\n bf16[1,2,2,2,8]{4,3,2,1,0} %parameter.0, s32[1,2,2]{2,1,0} %index),\n offset_dims={2,3,4}, collapsed_slice_dims={0,1}, start_index_map={0,1},\n index_vector_dim=2, slice_sizes={1,1,2,2,8},\n sharding={devices=[1,1,2,1,1]0,1 metadata={op_name=\"a\"}}\n ROOT %copy = bf16[1,2,2,2,8]{4,3,2,1,0} copy(%gather)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* index = FindInstruction(module.get(), \"index\");\n ASSERT_NE(index, nullptr);\n EXPECT_THAT(index, op::Sharding(\"{replicated}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(index->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(index->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, GatherToOperand_ParallelDimIsNotPartitioned) {\n absl::string_view hlo_string = R\"(\nHloModule module\nENTRY %module {\n %parameter.0 = s32[2,1000,1]{2,1,0} parameter(0)\n %parameter.1 = bf16[2,4819,4]{2,1,0} parameter(1)\n %iota = s32[2,1000,1]{1,0,2} iota(), iota_dimension=0\n %operand = bf16[2,4819,4]{2,1,0} copy(%parameter.1)\n %index = s32[2,1000,2]{2,1,0} concatenate(s32[2,1000,1]{1,0,2} %iota,\n s32[2,1000,1]{2,1,0} %parameter.0), dimensions={2},\n sharding={devices=[1,4,1]0,1,2,3}\n ROOT %gather = bf16[2,1000,4]{2,1,0} gather(bf16[2,4819,4]{2,1,0} %operand,\n s32[2,1000,2]{2,1,0} %index), offset_dims={2},\n collapsed_slice_dims={0,1}, start_index_map={0,1},\n index_vector_dim=2, slice_sizes={1,1,4},\n sharding={devices=[1,4,1]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* operand = FindInstruction(module.get(), \"operand\");\n EXPECT_THAT(operand, op::Sharding(\"{replicated}\"));\n}\nTEST_P(ParameterizedMetadataTest, ManualSubgroupForward) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3]{1,0} parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n %copy = f32[6,3]{1,0} copy(%param0)\n %param1 = f32[6,3]{1,0} parameter(1),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n %copy.1 = f32[6,3]{1,0} copy(%param1)\n %add = f32[6,3]{1,0} add(%copy, %copy.1)\n ROOT %copy.2 = f32[6,3]{1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ManualSubgroup_SingleOperandHasSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3]{1,0} parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n %copy = f32[6,3]{1,0} copy(%param0)\n %param1 = f32[6,3]{1,0} parameter(1)\n %copy.1 = f32[6,3]{1,0} copy(%param1)\n %add = f32[6,3]{1,0} add(%copy, %copy.1)\n ROOT %copy.2 = f32[6,3]{1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n auto* operand = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(operand->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ManualSubgroup_OneOperandReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3]{1,0} parameter(0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n %copy = f32[6,3]{1,0} copy(%param0)\n %param1 = f32[6,3]{1,0} parameter(1),\n sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name=\"a\"}}\n %copy.1 = f32[6,3]{1,0} copy(%param1)\n %add = f32[6,3]{1,0} add(%copy, %copy.1)\n ROOT %copy.2 = f32[6,3]{1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"add\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n auto* operand = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(operand, nullptr);\n EXPECT_THAT(operand,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(operand->sharding(), ShardingMetadata({}));\n }\n}\nTEST_P(ParameterizedMetadataTest, ManualSubgroupBackward) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3]{1,0} parameter(0)\n %copy = f32[6,3]{1,0} copy(%param0)\n %param1 = f32[6,3]{1,0} parameter(1)\n %copy.1 = f32[6,3]{1,0} copy(%param1)\n %add = f32[6,3]{1,0} add(%copy, %copy.1),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name=\"a\"}}\n ROOT %copy.2 = f32[6,3]{1,0} copy(%add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(false, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}\"));\n if (GetParam().propagate_metadata && !GetParam().clear_metadata) {\n EXPECT_THAT(instruction->sharding(),\n ShardingMetadata({CreateMetadata(\"a\")}));\n } else {\n EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));\n }\n}\nTEST_F(ShardingPropagationTest, SimpleManual) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %entry {\n %param0 = f32[6,3] parameter(0)\n %copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}\n %annotate = f32[6,3] custom-call(%copy), custom_call_target=\"Sharding\",\n sharding={devices=[2,1]0,1}\n %to_manual = f32[3,3] custom-call(%annotate),\n custom_call_target=\"SPMDFullToShardShape\", sharding={manual}\n %zero = f32[] constant(0)\n %reduce = f32[3] reduce(%to_manual, %zero), dimensions={1}, to_apply=%add\n %annotate2 = f32[3] custom-call(%reduce), custom_call_target=\"Sharding\",\n sharding={manual}\n %to_auto = f32[6] custom-call(%annotate2),\n custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[2]0,1}\n ROOT %copy.2 = f32[6] copy(%to_auto)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reduce\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{manual}\"));\n}\nTEST_F(ShardingPropagationTest, SimpleManualTuple) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = f32[] parameter(0)\n %rhs = f32[] parameter(1)\n ROOT %add = f32[] add(%lhs, %rhs)\n}\nENTRY %entry {\n %param0 = f32[6,3] parameter(0)\n %copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}\n %annotate = f32[6,3] custom-call(%copy), custom_call_target=\"Sharding\",\n sharding={devices=[2,1]0,1}\n %to_manual = f32[3,3] custom-call(%annotate),\n custom_call_target=\"SPMDFullToShardShape\", sharding={manual}\n %t = (f32[3,3]) tuple(%to_manual)\n %gte = f32[3,3] get-tuple-element(%t), index=0\n %to_auto = f32[3,3] custom-call(%gte),\n custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[2,1]0,1}\n ROOT %copy.2 = f32[3,3] copy(%to_auto)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"t\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{{manual}}\"));\n instruction = FindInstruction(module.get(), \"gte\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{manual}\"));\n}\nTEST_F(ShardingPropagationTest, DefaultManualCustomCallForward) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3]{1,0} parameter(0),\n sharding={manual metadata={op_name=\"a\"}}\n %copy = f32[6,3]{1,0} copy(%param0)\n %param1 = f32[6,3]{1,0} parameter(1)\n %copy.1 = f32[6,3]{1,0} copy(%param1)\n %param2 = f32[6,3]{1,0} parameter(2)\n %copy.2 = f32[6,3]{1,0} copy(%param2)\n %custom-call = (f32[], f32[6,3]{1,0}) custom-call(%copy, %copy.1, %copy.2), custom_call_target=\"some_custom_call\"\n ROOT %copy.3 = (f32[], f32[6,3]{1,0}) copy(%custom-call)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"custom-call\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{{manual},{manual}}\"));\n}\nTEST_F(ShardingPropagationTest, RefineUnspecifiedDims) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3] parameter(0)\n %copy = f32[6,3] copy(%param0),\n sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}\n %annotate = f32[6,3] custom-call(%copy), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1]\",\n sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}\n %copy.2 = f32[6,3] copy(%annotate)\n ROOT %copy.3 = f32[6,3] copy(%copy.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"copy.2\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[2,2]0,2,1,3}\"));\n}\nTEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3,8] parameter(0)\n %copy = f32[6,3,8] copy(%param0),\n sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\n %annotate = f32[6,3,8] custom-call(%copy), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\n %to_manual = f32[3,3,8] custom-call(%annotate),\n custom_call_target=\"SPMDFullToShardShape\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}\n %annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}\n %to_auto = f32[6,3,8] custom-call(%annotate2),\n custom_call_target=\"SPMDShardToFullShape\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\n %copy.2 = f32[6,3,8] copy(%to_auto)\n ROOT %copy.3 = f32[6,3,8] copy(%copy.2),\n sharding={devices=[1,1,2,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* copy2 = FindInstruction(module.get(), \"copy.2\");\n ASSERT_NE(copy2, nullptr);\n EXPECT_THAT(copy2, op::Sharding(\"{devices=[2,2,2]0,1,4,5,2,3,6,7}\"));\n auto* to_manual = FindInstruction(module.get(), \"to_manual\");\n ASSERT_NE(to_manual, nullptr);\n EXPECT_THAT(\n to_manual,\n op::Sharding(\n \"{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual}}\"));\n auto* to_auto = FindInstruction(module.get(), \"to_auto\");\n ASSERT_NE(to_auto, nullptr);\n EXPECT_THAT(to_auto, op::Sharding(\"{devices=[2,2,2]0,1,4,5,2,3,6,7}\"));\n}\nTEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3,8] parameter(0)\n %copy = f32[6,3,8] copy(%param0)\n %annotate1 = f32[6,3,8] custom-call(%copy), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\n %to_manual = f32[3,3,8] custom-call(%annotate1),\n custom_call_target=\"SPMDFullToShardShape\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}\n %annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}\n %annotate3 = f32[3,3,8] custom-call(%annotate2), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}\n %to_auto = f32[6,3,8] custom-call(%annotate3),\n custom_call_target=\"SPMDShardToFullShape\",\n backend_config=\"unspecified_dims=[1,2]\",\n sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\n %copy.2 = f32[6,3,8] copy(%to_auto),\n sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\n ROOT %copy.3 = f32[6,3,8] copy(%copy.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* copy = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(copy, nullptr);\n EXPECT_THAT(\n copy, op::Sharding(\n \"{devices=[2,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, DoNotRefineUnspecifiedDimsOnManual) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[6,3] parameter(0), sharding={manual}\n %annotate = f32[6,3] custom-call(%param0), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[1]\", sharding={manual}\n ROOT %copy.2 = f32[6,3] copy(%annotate), sharding={manual}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n EXPECT_TRUE(changed);\n for (auto* hlo : module->entry_computation()->instructions()) {\n EXPECT_TRUE(hlo->sharding().IsManual());\n }\n}\nTEST_F(ShardingPropagationTest, DoNotPassManualShardingToSPMDShardToFullShape) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}\n custom-call.2 = f32[2,3]{1,0} custom-call(p.0), custom_call_target=\"Sharding\", sharding={replicated}\n custom-call.3 = f32[2,3]{1,0} custom-call(custom-call.2), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}\n custom-call.4 = f32[2,3]{1,0} custom-call(custom-call.3), custom_call_target=\"Sharding\", sharding={manual}\n ROOT custom-call.5 = f32[16,3]{1,0} custom-call(custom-call.4), custom_call_target=\"SPMDShardToFullShape\", sharding={replicated}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n auto spmd_shard_to_full = module->entry_computation()->root_instruction();\n CHECK(spmd_shard_to_full->IsCustomCall(\"SPMDShardToFullShape\"));\n EXPECT_FALSE(spmd_shard_to_full->sharding().IsManual());\n}\nTEST_F(ShardingPropagationTest, ManualShardingPassThroughSplitConstant) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}\n p.1 = f32[2,3]{1,0} parameter(1), sharding={replicated}\n constant = f32[2,3]{1,0} constant({{0,1,2},{3,4,5}})\n custom-call.0 = f32[2,3]{1,0} custom-call(p.0), custom_call_target=\"Sharding\", sharding={replicated}\n custom-call.1 = f32[2,3]{1,0} custom-call(custom-call.0), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}\n add.0 = f32[2,3]{1,0} add(constant, custom-call.1)\n custom-call.2 = f32[2,3]{1,0} custom-call(add.0), custom_call_target=\"SPMDShardToFullShape\", sharding={replicated}\n add.1 = f32[2,3]{1,0} add(constant, p.1)\n ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(custom-call.2, add.1)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool is_split,\n HloConstantSplitter(true).Run(module.get()));\n EXPECT_TRUE(is_split);\n TF_ASSERT_OK_AND_ASSIGN(auto _, HloDCE().Run(module.get()));\n (void)_; \n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* add0 = FindInstruction(module.get(), \"add.0\");\n const HloInstruction* manual_constant = add0->operand(0);\n EXPECT_TRUE(manual_constant->IsConstant() &&\n manual_constant->sharding().IsManual());\n const HloInstruction* add1 = FindInstruction(module.get(), \"add.1\");\n const HloInstruction* replicate_constant = add1->operand(0);\n EXPECT_TRUE(replicate_constant->IsConstant() &&\n replicate_constant->sharding().IsReplicated());\n}\nTEST_F(ShardingPropagationTest, ReshapeNoMatchSubgroupManual) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[1,3,3] parameter(0),\n sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dims={manual}}\n %reshape = f32[3,1,3,1] reshape(%param0)\n ROOT %copy = f32[3,1,3,1] copy(%reshape)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"reshape\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(\n instruction,\n op::Sharding(\n \"{devices=[1,1,1,1,2,2]0,2,1,3 last_tile_dims={manual,replicated}}\"));\n}\nTEST_F(ShardingPropagationTest, X64Combine) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[102,192,192] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3}\n %param1 = f32[102,192,192] parameter(1),\n sharding={devices=[1,2,2]0,1,2,3}\n %custom-call = f64[102,192,192] custom-call(f32[102,192,192] %param0, f32[102,192,192] %param1), custom_call_target=\"X64Combine\"\n ROOT %copy = f64[102,192,192] copy(%custom-call),\n sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"custom-call\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, LayoutConstraint) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %reshape {\n %param0 = f32[102,192,192] parameter(0),\n sharding={devices=[1,2,2]0,1,2,3}\n %custom-call = f32[102,192,192]{0,1,2} custom-call(f32[102,192,192] %param0), custom_call_target=\"LayoutConstraint\"\n ROOT %copy = f32[102,192,192] copy(%custom-call),\n sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* instruction = FindInstruction(module.get(), \"custom-call\");\n EXPECT_THAT(instruction->shape(), ShapeUtil::MakeShapeWithDenseLayout(\n F32, {102, 192, 192}, {0, 1, 2}));\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction, op::Sharding(\"{devices=[1,2,2]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, OffloadingPropagation) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %offloading {\n %param0 = f32[1,256,128] parameter(0), sharding={devices=[1,1,4]0,1,2,3}\n %zero = f32[] constant(0.0)\n %broadcast = f32[256,256,128] broadcast(%zero), dimensions={}\n %izero = s32[] constant(0)\n %custom-call.0 = f32[1,256,128] custom-call(f32[1,256,128] %param0), custom_call_target=\"MoveToHost\"\n %dynamic-update-slice = f32[256,256,128] dynamic-update-slice(%broadcast, %custom-call.0, %izero, %izero, %izero)\n %dynamic-slice = f32[1,256,128] dynamic-slice(%dynamic-update-slice, %izero, %izero, %izero), dynamic_slice_sizes={1,256,128}\n %custom-call.1 = f32[1,256,128] custom-call(f32[1,256,128] %dynamic-slice), custom_call_target=\"MoveToDevice\"\n ROOT %copy = f32[1,256,128] copy(%custom-call.1), sharding={devices=[1,4,1]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* to_host = FindInstruction(module.get(), \"custom-call.0\");\n EXPECT_THAT(to_host, op::Sharding(\"{devices=[1,1,4]0,1,2,3}\"));\n auto* from_host_input =\n FindInstruction(module.get(), \"custom-call.1\")->operand(0);\n EXPECT_THAT(from_host_input, op::Sharding(\"{devices=[1,1,4]0,1,2,3}\"));\n}\nTEST_P(ParameterizedMetadataTest, PropagateThroughSingleUsers) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], f32[10,10], f32[10,10]) parameter(0)\n %count.cond = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT\n}\n%body {\n %vars = (u32[], f32[10,10], f32[10,10]) parameter(0)\n %count = u32[] get-tuple-element(%vars), index=0\n %acc = f32[10,10] get-tuple-element((u32[], f32[10,10],f32[10,10]) %vars), index=1\n %cvt = s32[10,10] convert(acc)\n %one = u32[] constant(1)\n %count.1 = u32[] add(u32[] %count, u32[] %one)\n %acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}\n %acc.1 = f32[10,10] convert(acc.i)\n ROOT %tuple = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc, f32[10,10] %acc.1)\n}\nENTRY %entry {\n %p0 = f32[10,10] parameter(0)\n %p0.copy = f32[10,10] copy(f32[10,10] %p0), sharding={devices=[4,1]0,1,2,3}\n %p1 = f32[10,10] parameter(1)\n %p2 = f32[10,10] parameter(2)\n %p2.copy = f32[10,10] copy(f32[10,10] %p2)\n %zero = u32[] constant(0)\n %init = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy, f32[10,10] %p2.copy)\n %while = (u32[], f32[10,10], f32[10,10]) while((u32[], f32[10,10], f32[10,10]) %init),\n body=%body, condition=%cond\n %g1 = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=0\n %g2 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=1\n %g3 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=2\n ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2, %g3)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto body_root = FindInstruction(module.get(), \"tuple\");\n EXPECT_NE(nullptr, body_root);\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n VLOG(1) << \"Mod:\";\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* convert_instr = FindInstruction(module.get(), \"cvt\");\n EXPECT_THAT(convert_instr, op::Sharding(\"{devices=[4,1]0,1,2,3}\"));\n}\nTEST_P(ParameterizedMetadataTest, NestedTupleFromUserSharding) {\n const char* const hlo_string = R\"(\nHloModule module\n%cond {\n %vars.cond = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)\n %count.cond = u32[] get-tuple-element(%vars.cond), index=0\n %limit = u32[] constant(10)\n ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT\n}\n%body {\n %vars = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)\n %count = u32[] get-tuple-element(%vars), index=0\n %fwd = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%vars), index=1\n %acc = f32[10,10] get-tuple-element(%vars), index=2\n %cvt = s32[10,10] convert(acc)\n %one = u32[] constant(1)\n %count.1 = u32[] add(u32[] %count, u32[] %one)\n %acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt)\n %acc.1 = f32[10,10] convert(acc.i)\n ROOT %tuple = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%count.1, %fwd, %acc.1)\n}\nENTRY %entry {\n %p0 = f32[10,10] parameter(0)\n %p0.copy = f32[10,10] copy(f32[10,10] %p0)\n %p1 = f32[10,10] parameter(1)\n %p1.copy = f32[10,10] copy(f32[10,10] %p1)\n %p2 = f32[10,10] parameter(2)\n %p2.copy = f32[10,10] copy(f32[10,10] %p2)\n %zero = u32[] constant(0)\n %zerof = f32[] constant(0)\n %init0 = (f32[10,10], f32[10,10]) tuple(%p0.copy, %p1.copy)\n %init1 = ((f32[10,10], f32[10,10]), f32[]) tuple(%init0, %zerof)\n %init = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%zero, %init1, %p2.copy)\n %while = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) while(%init),\n body=%body, condition=%cond\n %g1 = u32[] get-tuple-element(%while), index=0\n %g2 = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%while), index=1\n %g2.0 = (f32[10,10], f32[10,10]) get-tuple-element(%g2), index=0\n %g2.0.0 = f32[10,10] get-tuple-element(%g2.0), index=0\n %g3 = f32[10,10] get-tuple-element(%while), index=2\n %copy.g3 = f32[10,10] copy(%g3), sharding={devices=[4,1]0,1,2,3}\n ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2.0.0, %g3)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n auto body_root = FindInstruction(module.get(), \"tuple\");\n EXPECT_NE(nullptr, body_root);\n if (GetParam().clear_metadata) {\n ClearMetadata(module.get());\n }\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, GetParam().propagate_metadata)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n const HloInstruction* convert_instr =\n FindInstruction(module.get(), \"p2.copy\");\n EXPECT_THAT(convert_instr, op::Sharding(\"{devices=[4,1]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, CSEPreventionOnly) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[] parameter(0), sharding={replicated}\n %br = f32[4] broadcast(%param0), dimensions={}\n %add = f32[4] add(%br, %br)\n %annotate = f32[4] custom-call(%add), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={replicated}\n ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false},\n true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* br = FindInstruction(module.get(), \"br\");\n EXPECT_THAT(br, op::Sharding(\"{devices=[4]0,1,2,3}\"));\n EXPECT_THAT(br->sharding(), ShardingMetadata({CreateMetadata(\n \"_sharding_propagation_cse_prevention\")}));\n EXPECT_THAT(FindInstruction(module.get(), \"annotate\"),\n AllOf(op::Sharding(\"{replicated}\"), op::CustomCall()));\n EXPECT_FALSE(FindInstruction(module.get(), \"add\")->has_sharding());\n}\nTEST_F(ShardingPropagationTest, RemoveCSEPrevention) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[] parameter(0), sharding={replicated}\n %br = f32[4] broadcast(%param0), dimensions={},\n sharding={devices=[4]0,1,2,3 metadata={op_name=\"_sharding_propagation_cse_prevention\"}}\n %add = f32[4] add(%br, %br)\n %annotate = f32[4] custom-call(%add), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={replicated}\n ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]3,2,1,0}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(FindInstruction(module.get(), \"br\"),\n op::Sharding(\"{devices=[4]3,2,1,0}\"));\n EXPECT_THAT(FindInstruction(module.get(), \"add\"),\n op::Sharding(\"{devices=[4]3,2,1,0}\"));\n}\nTEST_F(ShardingPropagationTest, ReshapeTrivialDimPartialReplicate) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[8,128] parameter(0), sharding={replicated}\n %c = f32[8,128] copy(%param0)\n %rsp = f32[8,1,128] reshape(%c),\n sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}\n ROOT %copy = f32[8,1,128] copy(%rsp),\n sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(\n FindInstruction(module.get(), \"c\"),\n op::Sharding(\"{devices=[1,4,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, EmptyTupleWithinTuple) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[2] parameter(0), sharding={replicated}\n %et = () tuple()\n %tuple = (f32[2], (), (), f32[2]) tuple(%param0, %et, %et, %param0)\n ROOT %copy = (f32[2], (), (), f32[2]) copy(%tuple)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n}\nTEST_F(ShardingPropagationTest, ContractingAsNonContractingCrash) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %p0 = f32[20,64,56,56]{3,2,1,0} parameter(0), sharding={replicated}\n %p1 = f32[1,1,256,64]{2,3,1,0} parameter(1), sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7}\n %convolution.4512 = f32[20,256,56,56]{3,2,1,0} convolution(%p0, %p1), window={size=1x1}, dim_labels=bf01_01oi->bf01\n ROOT %copy = f32[20,256,56,56]{3,2,1,0} copy(%convolution.4512)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n}\nTEST_F(ShardingPropagationTest, PropagateReduceManualTuple) {\n const char* const hlo_string = R\"(\nHloModule pjit\norclone {\n lhs.1 = u32[] parameter(0)\n rhs.1 = u32[] parameter(2)\n or.2 = u32[] or(lhs.1, rhs.1)\n lhs.0 = u32[] parameter(1)\n rhs.0 = u32[] parameter(3)\n or.3 = u32[] or(lhs.0, rhs.0)\n ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3)\n}\nENTRY %main.21 {\n select.104 = u32[2,2]{1,0} parameter(0), sharding={manual}\n shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual}\n constant.4183 = u32[] constant(0), sharding={manual}\n reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, to_apply=orclone\n ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n EXPECT_THAT(FindInstruction(module.get(), \"reduce.1\"),\n op::Sharding(\"{{manual}, {manual}}\"));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n}\nTEST_F(ShardingPropagationTest, MergeCompatibleTiles) {\n const char* const hlo_string = R\"(\nHloModule pjit\nENTRY %main.21 {\n p = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(0), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}\n p2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}\n c0 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p)\n c1 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p2)\n a = bf16[8,4,256,1024,12288]{4,3,2,1,0} add(c0, c1)\n ROOT c2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(a), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(FindInstruction(module.get(), \"c1\"),\n op::Sharding(\"{devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}\"));\n}\nTEST_F(ShardingPropagationTest, OutfeedUser) {\n const char* const hlo_string = R\"(\nHloModule pjit\nENTRY %main.21 {\n p = f32[10,128]{1,0} parameter(0)\n c = f32[10,128]{1,0} copy(p)\n t = (f32[10,128]{1,0}) tuple(c)\n a = token[] after-all()\n ROOT of = token[] outfeed((f32[10,128]{1,0}) %t, token[] %a), outfeed_shape=(f32[10,128]{1,0}), sharding={{devices=[2,1]0,1}, {maximal device=0}}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(FindInstruction(module.get(), \"c\"),\n op::Sharding(\"{devices=[2,1]0,1}\"));\n}\nTEST_F(ShardingPropagationTest, SortForwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\ncompare {\n p.0.lhs = f32[] parameter(0), sharding={replicated}\n p.0.rhs = f32[] parameter(1), sharding={replicated}\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}\n}\nENTRY entry {\n param.0 = f32[1024,1024]{1,0} parameter(0)\n negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}\n %shard-barrier-from = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-from), dimensions={1}, is_stable=true, to_apply=compare\n ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_FALSE(FindInstruction(module.get(), \"sort.0\")->has_sharding());\n}\nTEST_F(ShardingPropagationTest, SortBackwardWithBarrier) {\n const char* const hlo_string = R\"(\nHloModule module\ncompare {\n p.0.lhs = f32[] parameter(0), sharding={replicated}\n p.0.rhs = f32[] parameter(1), sharding={replicated}\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}\n}\nENTRY entry {\n param.0 = f32[1024,1024]{1,0} parameter(0)\n negate.0 = f32[1024,1024]{1,0} negate(param.0)\n %shard-barrier-to = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-to), dimensions={1}, is_stable=true, to_apply=compare,\n sharding={devices=[1,8]0,1,2,3,4,5,6,7}\n ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n std::ignore,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_THAT(FindInstruction(module.get(), \"negate.0\"),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankOne) {\n const char* const hlo_string = R\"(\nHloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}\ncompare {\n p.0.lhs = f32[] parameter(0), sharding={replicated}\n p.0.rhs = f32[] parameter(1), sharding={replicated}\n p.1.lhs = s32[] parameter(2), sharding={replicated}\n p.1.rhs = s32[] parameter(3), sharding={replicated}\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}\n}\nENTRY entry {\n param.0 = f32[1024]{0} parameter(0)\n negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]0,1,2,3,4,5,6,7}\n iota.0 = s32[1024]{0} iota(), iota_dimension=0\n sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare\n ROOT copy.0 = (f32[1024]{0}, s32[1024]{0}) copy(sort.0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_FALSE(changed); \n}\nTEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankTwo) {\n const char* const hlo_string = R\"(\nHloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0}, s32[1024,1024]{1,0})}\ncompare {\n p.0.lhs = f32[] parameter(0), sharding={replicated}\n p.0.rhs = f32[] parameter(1), sharding={replicated}\n p.1.lhs = s32[] parameter(2), sharding={replicated}\n p.1.rhs = s32[] parameter(3), sharding={replicated}\n ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}\n}\nENTRY entry {\n param.0 = f32[1024,1024]{1,0} parameter(0)\n negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}\n iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1\n sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare\n ROOT copy.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) copy(sort.0)\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(FindInstruction(module.get(), \"iota.0\"),\n op::Sharding(\"{devices=[1,8]0,1,2,3,4,5,6,7}\"));\n EXPECT_THAT(\n FindInstruction(module.get(), \"sort.0\"),\n op::Sharding(\n \"{{devices=[1,8]0,1,2,3,4,5,6,7}, {devices=[1,8]0,1,2,3,4,5,6,7}}\"));\n}\nTEST_F(ShardingPropagationTest, ConditionalManual) {\n const char* const hlo_string = R\"(\nHloModule module\n%true_comp {\n %tp = (f32[3,5], f32[]) parameter(0)\n %tgte.0 = f32[3,5] get-tuple-element(%tp), index=0\n %tgte.1 = f32[] get-tuple-element(%tp), index=1\n %ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}\n %broadcast.1 = f32[5,3] broadcast(%tgte.1), dimensions={}\n %add.1 = f32[5,3] add(%broadcast.1, %ttr)\n ROOT %tr = (f32[5,3], f32[]) tuple(%add.1, %tgte.1)\n}\n%false_comp {\n %fp = (f32[5,3], f32[5,3], f32[]) parameter(0)\n %fgte.0 = f32[5,3] get-tuple-element(%fp), index=0\n %fgte.1 = f32[] get-tuple-element(%fp), index=2\n ROOT %fr = (f32[5,3], f32[]) tuple(%fgte.0, %fgte.1)\n}\nENTRY entry {\n %cond = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}\n %tp.0 = f32[3,5] parameter(1), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}\n %fp.0 = f32[5,3] parameter(2), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}\n %const0 = f32[] constant(0)\n %const1 = f32[] constant(1)\n %true_param = (f32[3,5], f32[]) tuple(%tp.0, %const0)\n %false_param = (f32[5,3], f32[5,3], f32[]) tuple(%fp.0, fp.0, %const1)\n ROOT %conditional = (f32[5,3], f32[]) conditional(\n %cond, %true_param, %false_param),\n true_computation=%true_comp,\n false_computation=%false_comp\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tp = FindInstruction(module.get(), \"tp\");\n auto* true_param = FindInstruction(module.get(), \"true_param\");\n EXPECT_EQ(tp->sharding(), true_param->sharding());\n auto* fp = FindInstruction(module.get(), \"fp\");\n auto* false_param = FindInstruction(module.get(), \"false_param\");\n EXPECT_EQ(fp->sharding(), false_param->sharding());\n}\nTEST_F(ShardingPropagationTest, WhileDSManual) {\n const char* const hlo_string = R\"(\nHloModule module\nwhile.condition {\n arg_tuple = (s32[], pred[2,8,4]) parameter(0)\n tripcount = s32[] get-tuple-element(arg_tuple), index=0\n triplimit = s32[] constant(2)\n ROOT compare.0 = pred[] compare(tripcount, triplimit), direction=LT\n}\nwhile.body {\n arg_tuple = (s32[], pred[2,8,4]) parameter(0)\n tripcount = s32[] get-tuple-element(arg_tuple), index=0\n one = s32[] constant(0)\n tripcount_next = s32[] add(tripcount, one)\n preds.1 = pred[2,8,4] get-tuple-element(arg_tuple), index=1\n zero.1 = s32[] constant(0)\n dynamic-slice.1 = pred[1,8,4] dynamic-slice(preds.1, tripcount, zero.1, zero.1), dynamic_slice_sizes={1,8,4}, sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}\n ROOT result = (s32[], pred[2,8,4]) tuple(tripcount_next, preds.1)\n}\nENTRY entry {\n preds = pred[2,8,4] parameter(0), sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}\n zero = s32[] constant(0)\n tuple.13 = (s32[], pred[2,8,4]) tuple(zero, preds)\n ROOT result = while(tuple.13), condition=while.condition, body=while.body\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true)\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* tuple = FindInstruction(module.get(), \"tuple.13\");\n EXPECT_THAT(tuple, op::Sharding(\"{{replicated}, {devices=[1,1,1,2,4]<=[8] \"\n \"last_tile_dims={manual, replicated}}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToOutput) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[] parameter(0), sharding={replicated}\n %br = f32[4] broadcast(%param0), dimensions={}\n %annotate = f32[4] custom-call(%br), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={devices=[4]0,1,2,3}\n ROOT %add = f32[4] add(%annotate, %annotate), sharding={replicated}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToOutputTuplePartial) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[] parameter(0), sharding={replicated}\n %br = f32[4] broadcast(%param0), dimensions={}\n %annotate = f32[4] custom-call(%br), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={devices=[4]0,1,2,3}\n %add = f32[4] add(%annotate, %annotate)\n %param1 = f32[] parameter(1), sharding={replicated}\n %br1 = f32[4] broadcast(%param1), dimensions={}\n %annotate1 = f32[4] custom-call(%br1), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={devices=[4]0,1,2,3}\n %add1 = f32[4] add(%annotate1, %annotate1)\n ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[4]0,1,2,3},{replicated}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToOutputTupleFull) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[] parameter(0), sharding={replicated}\n %br = f32[4] broadcast(%param0), dimensions={}\n %annotate = f32[4] custom-call(%br), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={devices=[4]0,1,2,3}\n %add = f32[4] add(%annotate, %annotate)\n %param1 = f32[] parameter(1), sharding={replicated}\n %br1 = f32[4] broadcast(%param1), dimensions={}\n %annotate1 = f32[4] custom-call(%br1), custom_call_target=\"Sharding\",\n backend_config=\"unspecified_dims=[0]\", sharding={devices=[4]0,1,2,3}\n %add1 = f32[4] add(%annotate1, %annotate1)\n ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(true, true,\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled1) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0)\n ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, false})\n .Run(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_FALSE(\n module->entry_computation()->parameter_instruction(0)->has_sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed, ShardingPropagation(true).Run(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled3) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0)\n %param1 = f32[4] parameter(1), sharding={replicated}\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false})\n .Run(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_FALSE(\n module->entry_computation()->parameter_instruction(0)->has_sharding());\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled4) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, false})\n .Run(module.get()));\n EXPECT_FALSE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersPartial1) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersPartial2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0)\n %param1 = f32[4] parameter(1), sharding={replicated}\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_FALSE(\n module->entry_computation()->parameter_instruction(0)->has_sharding());\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersPartial3) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1)\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersPartial4) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0)\n %param1 = f32[4] parameter(1)\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_FALSE(\n module->entry_computation()->parameter_instruction(0)->has_sharding());\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersFull1) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0)\n %param1 = f32[4] parameter(1)\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToParametersFull2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1)\n ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {true, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithoutSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param = (f32[4], f32[4]) parameter(0)\n %gte0 = f32[4] get-tuple-element(%param), index=0\n %gte1 = f32[4] get-tuple-element(%param), index=1\n ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {true, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding1) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}\n %gte0 = f32[4] get-tuple-element(%param), index=0\n %gte1 = f32[4] get-tuple-element(%param), index=1\n ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{{replicated}, {devices=[4]0,1,2,3}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}\n %gte0 = f32[4] get-tuple-element(%param), index=0\n %gte1 = f32[4] get-tuple-element(%param), index=1\n ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {true, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{{devices=[4]0,1,2,3}, {replicated}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateManualOutfeed) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p0 = f32[8]{0} parameter(0)\n p1 = f32[1]{0} parameter(1)\n tuple.1 = (f32[8]{0}) tuple(p0)\n constant.8 = u32[2]{0} constant({3, 12})\n tuple.10 = (u32[2]{0}) tuple(constant.8)\n aa.1 = token[] after-all()\n outfeed.1 = token[] outfeed(tuple.10, aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}}\n outfeed.2 = token[] outfeed(tuple.1, outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}}\n ROOT tuple.15 = (f32[1]{0}, token[]) tuple(p1, outfeed.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true, true},\n {true, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{replicated}, {manual}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateFromDanglingShardingCustomCall) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p.0 = s32[40000]{0} parameter(0)\n add = s32[40000]{0} add(p.0, p.0)\n cc = s32[40000]{0} custom-call(add), custom_call_target=\"Sharding\", sharding={devices=[4]0,1,2,3}\n ROOT mul = s32[40000]{0} multiply(add, add)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n HloDCE dce;\n TF_ASSERT_OK_AND_ASSIGN(bool dce_ed, RunHloPass(&dce, module.get()));\n EXPECT_TRUE(dce_ed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"param0\");\n EXPECT_EQ(instruction, nullptr);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[4]0,1,2,3}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToParameterIfNotDivisible_WithSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[3] parameter(1), sharding={replicated}\n %pad_value = f32[] constant(0)\n %pad = f32[4] pad(%param1, %pad_value), padding=0_1\n ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToParameterIfNotDivisible_WithoutSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[3] parameter(1)\n %pad_value = f32[] constant(0)\n %pad = f32[4] pad(%param1, %pad_value), padding=0_1\n ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{replicated}\"));\n EXPECT_THAT(module->entry_computation()->parameter_instruction(1),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest, DoNotPropagateToTupleParameterIfNotDivisible) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = (f32[4], f32[3]) parameter(0), sharding={{replicated}, {replicated}}\n %gte0 = f32[4] get-tuple-element(%param0), index=0\n %gte1 = f32[3] get-tuple-element(%param0), index=1\n %pad_value = f32[] constant(0)\n %pad = f32[4] pad(%gte1, %pad_value), padding=0_1\n ROOT %add = f32[4] add(%gte0, %pad), sharding={devices=[4]0,1,2,3}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false, true})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->parameter_instruction(0),\n op::Sharding(\"{{replicated}, {replicated}}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToOutputIfNotDivisible_WithSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}, sharding={replicated}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToOutputIfNotDivisible_WithoutSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{replicated}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToOutputTupleIfNotDivisible_WithSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n %slice = f32[3] slice(%add), slice={[0:3:1]}\n ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice), sharding={{replicated}, {replicated}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false, true},\n {false, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{replicated}, {replicated}}\"));\n}\nTEST_F(ShardingPropagationTest,\n DoNotPropagateToOutputTupleIfNotDivisible_WithoutSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n %param0 = f32[4] parameter(0), sharding={replicated}\n %param1 = f32[4] parameter(1), sharding={replicated}\n %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}\n %slice = f32[3] slice(%add), slice={[0:3:1]}\n ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true, true},\n {false, false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[4]0,1,2,3}, {replicated}}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateShardLikeDifferentSharding) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}\n p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}\n add.1 = s32[16,16] add(p.0, p.0)\n sharding.1 = s32[16,16] custom-call(add.1), custom_call_target=\"Sharding\", sharding={unknown shard_like 0}\n add.2 = s32[16,16] add(p.1, p.1)\n sharding.2 = s32[16,16] custom-call(add.2), custom_call_target=\"Sharding\", sharding={unknown shard_like 0}\n ROOT mul = s32[16,16] multiply(add.1, add.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* add_0 = FindInstruction(module.get(), \"add.1\");\n ASSERT_NE(add_0, nullptr);\n auto* add_1 = FindInstruction(module.get(), \"add.2\");\n ASSERT_NE(add_1, nullptr);\n EXPECT_NE(add_0->sharding(), add_1->sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateShardLikeSameSharding) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = s32[] parameter(0)\n %rhs = s32[] parameter(1)\n ROOT %add = s32[] add(%lhs, %rhs)\n}\nENTRY %entry {\n p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}\n p.1 = s32[16,16] parameter(1)\n add.1 = s32[16,16] add(p.0, p.0)\n sharding.1 = s32[16,16] custom-call(add.1), custom_call_target=\"Sharding\", sharding={unknown shard_like 0}\n init = s32[] constant(0)\n reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add\n add.2 = s32[16,16] add(p.1, p.1)\n sharding.2 = s32[16,16] custom-call(add.2), custom_call_target=\"Sharding\", sharding={unknown shard_like 0}\n reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add\n ROOT mul = s32[] multiply(reduce.1, reduce.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* add_1 = FindInstruction(module.get(), \"add.1\");\n ASSERT_NE(add_1, nullptr);\n auto* add_2 = FindInstruction(module.get(), \"add.2\");\n ASSERT_NE(add_2, nullptr);\n EXPECT_EQ(add_1->sharding(), add_2->sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateShardAs) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}\n p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}\n add.1 = s32[16,16] add(p.0, p.0)\n sharding.1 = s32[16,16] custom-call(add.1), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n add.2 = s32[16,16] add(p.1, p.1)\n sharding.2 = s32[16,16] custom-call(add.2), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n ROOT mul = s32[16,16] multiply(add.1, add.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* add_1 = FindInstruction(module.get(), \"add.1\");\n ASSERT_NE(add_1, nullptr);\n auto* add_2 = FindInstruction(module.get(), \"add.2\");\n ASSERT_NE(add_2, nullptr);\n EXPECT_EQ(add_1->sharding(), add_2->sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateShardAsToParameters) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = s32[] parameter(0)\n %rhs = s32[] parameter(1)\n ROOT %add = s32[] add(%lhs, %rhs)\n}\nENTRY %entry {\n p.0 = s32[16,16] parameter(0), sharding={unknown shard_as 0}\n p.1 = s32[16,16] parameter(1), sharding={devices=[4,2]0,1,2,3,4,5,6,7}\n add.1 = s32[16,16] add(p.0, p.0)\n init = s32[] constant(0)\n reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add\n add.2 = s32[16,16] add(p.1, p.1)\n sharding.2 = s32[16,16] custom-call(add.2), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add\n ROOT mul = s32[] multiply(reduce.1, reduce.2)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {true, true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* p_0 = FindInstruction(module.get(), \"p.0\");\n ASSERT_NE(p_0, nullptr);\n auto* add_2 = FindInstruction(module.get(), \"add.2\");\n ASSERT_NE(add_2, nullptr);\n EXPECT_THAT(add_2, op::Sharding(\"{devices=[4,2]0,1,2,3,4,5,6,7}\"));\n EXPECT_EQ(p_0->sharding(), add_2->sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateShardAsToOutputs) {\n const char* const hlo_string = R\"(\nHloModule module\n%add {\n %lhs = s32[] parameter(0)\n %rhs = s32[] parameter(1)\n ROOT %add = s32[] add(%lhs, %rhs)\n}\nENTRY %entry {\n p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}\n add.1 = s32[16,16] add(p.0, p.0)\n sharding.1 = s32[16,16] custom-call(add.1), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n init = s32[] constant(0)\n reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add\n broadcast.1 = s32[16,16] broadcast(reduce.1), dimensions={}\n ROOT mul = s32[16,16] multiply(broadcast.1, broadcast.1), sharding={unknown shard_as 0}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* add_1 = FindInstruction(module.get(), \"add.1\");\n ASSERT_NE(add_1, nullptr);\n auto* output = FindInstruction(module.get(), \"mul\");\n ASSERT_NE(output, nullptr);\n EXPECT_THAT(add_1, op::Sharding(\"{devices=[4,2]0,1,2,3,4,5,6,7}\"));\n EXPECT_EQ(add_1->sharding(), output->sharding());\n}\nTEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput) {\n const char* const hlo_string = R\"(\nHloModule jit_zeros_like\nENTRY main.6 {\n Arg_0.1 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}\n custom-call.4 = s64[8,2]{1,0} custom-call(Arg_0.1), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n constant.2 = s64[] constant(0)\n broadcast.3 = s64[8,2]{1,0} broadcast(constant.2), dimensions={}\n ROOT custom-call.5 = s64[8,2]{1,0} custom-call(broadcast.3), custom_call_target=\"Sharding\", sharding={unknown shard_as 0}\n}\n)\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n VLOG(1) << module->ToString();\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{devices=[4,2]0,1,2,3,4,5,6,7}\"));\n}\nTEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput2) {\n const char* const hlo_string = R\"(\nHloModule jit_f, entry_computation_layout={(f32[8]{0:T(256)})->(f32[8]{0:T(256)}, f32[8]{0:T(256)})}, allow_spmd_sharding_propagation_to_output={true,true}, num_partitions=4\nENTRY main.9 {\n Arg_0.1 = f32[8]{0} parameter(0)\n custom-call.6 = f32[8]{0} custom-call(Arg_0.1), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name=\"jit(f)/jit(main)/shard_alike\" source_file=\"third_party/py/jax/tests/shard_alike_test.py\" source_line=206}\n custom-call.4 = f32[8]{0} custom-call(Arg_0.1), custom_call_target=\"Sharding\", sharding={devices=[4]<=[4]}, metadata={op_name=\"jit(f)/jit(main)/sharding_constraint[sharding=GSPMDSharding({devices=[4]<=[4]}) resource_env=ResourceEnv(mesh=Mesh(), ()) unconstrained_dims=set()]\" source_file=\"third_party/py/jax/tests/shard_alike_test.py\" source_line=204}\n constant.0 = f32[] constant(2)\n broadcast.0 = f32[8]{0} broadcast(constant.0), dimensions={}\n multiply.5 = f32[8]{0} multiply(custom-call.4, broadcast.0), metadata={op_name=\"jit(f)/jit(main)/mul\" source_file=\"third_party/py/jax/tests/shard_alike_test.py\" source_line=205}\n custom-call.7 = f32[8]{0} custom-call(multiply.5), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name=\"jit(f)/jit(main)/shard_alike\" source_file=\"third_party/py/jax/tests/shard_alike_test.py\" source_line=206}\n ROOT tuple.8 = (f32[8]{0}, f32[8]{0}) tuple(custom-call.6, custom-call.7)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true, true},\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n VLOG(1) << module->ToString();\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::Sharding(\"{{devices=[4]<=[4]}, {devices=[4]<=[4]}}\"));\n}\nTEST_F(ShardingPropagationTest, LookaheadUsersOfDot) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %entry {\n p0 = bf16[512,512,1024]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]}\n p1 = bf16[512,512,16,128]{3,2,1,0} parameter(1), sharding={devices=[16,1,4,1]<=[64]}\n p2 = bf16[16,1024,16,128]{3,2,1,0} parameter(2), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}\n p3 = s32[] parameter(3)\n dot.1 = bf16[1024,16,128]{2,1,0} dot(p0, p1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}\n reshape.1 = bf16[1,1024,16,128]{3,2,1,0} reshape(dot.1)\n constant.1 = s32[] constant(0)\n ROOT dynamic-update-slice.113 = bf16[16,1024,16,128]{3,2,1,0} dynamic-update-slice(p2, reshape.1, p3, constant.1, constant.1, constant.1), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"dot.1\");\n EXPECT_THAT(instruction,\n op::Sharding(\n \"{devices=[4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, AsyncInstructionManualShardingArray) {\n const char* const hlo_string = R\"(\nHloModule module\ncalled_computation {\n p0 = s32[8] parameter(0)\n p1 = s32[8] parameter(1)\n ROOT add = s32[8] add(p0, p1)\n}, execution_thread=\"thread_1\" \nENTRY entry_computation {\n p0 = s32[8] parameter(0), sharding={manual}\n p1 = s32[8] parameter(1), sharding={manual}\n async-start = ((s32[8], s32[8]), s32[8], u32[]) call-start(p0, p1), async_execution_thread=\"thread_1\", to_apply=called_computation\n ROOT async-done = s32[8] call-done(async-start)\n}, execution_thread=\"thread_0\" \n)\";\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get(), {\"thread_0\"}));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"async-start\");\n ASSERT_NE(instruction, nullptr);\n EXPECT_THAT(instruction,\n op::Sharding(\"{{manual}, {manual}, {manual}, {manual}}\"));\n auto* async_done = FindInstruction(module.get(), \"async-done\");\n ASSERT_NE(async_done, nullptr);\n EXPECT_THAT(async_done, op::Sharding(\"{manual}\"));\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get(), {\"thread_0\", \"thread_1\"}));\n EXPECT_FALSE(changed);\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_FALSE(changed);\n }\n}\nTEST_F(ShardingPropagationTest, AsyncInstructionManualShardingTuple) {\n const char* const hlo_string = R\"(\nHloModule module\ncalled_computation {\n p0 = s32[8] parameter(0)\n p1 = s32[8] parameter(1)\n add = s32[8] add(p0, p1)\n mul = s32[8] multiply(p0, p1)\n ROOT result = (s32[8], s32[8]) tuple(add, mul)\n}, execution_thread=\"thread_1\" \nENTRY entry_computation {\n p0 = s32[8] parameter(0), sharding={manual}\n p1 = s32[8] parameter(1), sharding={manual}\n async-start = ((s32[8], s32[8]), (s32[8], s32[8]), u32[]) call-start(p0, p1), async_execution_thread=\"thread_1\", to_apply=called_computation\n ROOT async-done = (s32[8], s32[8]) call-done(async-start)\n}, execution_thread=\"thread_0\" \n)\";\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get(), {\"thread_0\"}));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* async_start = FindInstruction(module.get(), \"async-start\");\n ASSERT_NE(async_start, nullptr);\n EXPECT_THAT(\n async_start,\n op::Sharding(\"{{manual}, {manual}, {manual}, {manual}, {manual}}\"));\n auto* async_done = FindInstruction(module.get(), \"async-done\");\n ASSERT_NE(async_done, nullptr);\n EXPECT_THAT(async_done, op::Sharding(\"{{manual}, {manual}}\"));\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get(), {\"thread_0\", \"thread_1\"}));\n EXPECT_FALSE(changed);\n }\n {\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_FALSE(changed);\n }\n}\nTEST_F(ShardingPropagationTest, ShardAsWithShardBarrier) {\n const char* const hlo_string = R\"(\nHloModule pjit_f\nENTRY main.11 {\n Arg_0.1 = bf16[384,1408]{1,0} parameter(0), sharding={devices=[1,16,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}\n broadcast.4 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}\n custom-call.5 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.4), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 1}\n broadcast.2 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}\n custom-call.3 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.2), custom_call_target=\"Sharding\", sharding={devices=[8,1,1,1024]<=[8192] last_tile_dim_replicate}, backend_config=\"unspecified_dims=[1,2]\"\n custom-call.6 = bf16[8,384,1408]{2,1,0} custom-call(custom-call.3), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 1}\n %shard-barrier-to = bf16[8,384,1408]{2,1,0} custom-call(%custom-call.6), custom_call_target=\"ShardBarrierTo\", custom_call_has_side_effect=true\n slice.7 = bf16[1,384,1408]{2,1,0} slice(shard-barrier-to), slice={[1:2], [0:384], [0:1408]}\n reshape.8 = bf16[384,1408]{1,0} reshape(slice.7)\n tuple.9 = (bf16[384,1408]{1,0}) tuple(reshape.8)\n get-tuple-element.10 = bf16[384,1408]{1,0} get-tuple-element(tuple.9), index=0, sharding={devices=[16,1,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}\n ROOT tuple.13 = (bf16[384,1408]{1,0}, bf16[8,384,1408]{2,1,0}) tuple(get-tuple-element.10, custom-call.5)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* broadcast_4 = FindInstruction(module.get(), \"broadcast.4\");\n ASSERT_NE(broadcast_4, nullptr);\n EXPECT_THAT(\n broadcast_4,\n op::Sharding(\"{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}\"));\n auto* copy = FindInstruction(module.get(), \"copy\");\n ASSERT_NE(copy, nullptr);\n EXPECT_THAT(\n copy,\n op::Sharding(\"{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, ShardAsWithShardBarrier2) {\n const char* const hlo_string = R\"(\nHloModule module\nENTRY %elementwise {\n %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)\n %custom-call.0 = f32[5,7,11,13]{3,2,1,0} custom-call(param0), custom_call_target=\"Sharding\", sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}, backend_config=\"unspecified_dims=[1,2,3]\"\n %shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%custom-call.0), custom_call_target=\"ShardBarrierFrom\", custom_call_has_side_effect=true\n %custom-call.2 = f32[5,7,11,13]{3,2,1,0} custom-call(shard-barrier-from), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 1}\n %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)\n %custom-call.1 = f32[5,7,11,13]{3,2,1,0} custom-call(param1), custom_call_target=\"Sharding\", sharding={devices=[1,2,2,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}, backend_config=\"unspecified_dims=[0]\"\n %custom-call.3 = f32[5,7,11,13]{3,2,1,0} custom-call(custom-call.1), custom_call_target=\"Sharding\", custom_call_has_side_effect=true, sharding={unknown shard_as 1}\n ROOT %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(%custom-call.0, %custom-call.3)\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {false, false})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_THAT(\n module->entry_computation()->root_instruction(),\n op::Sharding(\n \"{{devices=[2,2,2,1]<=[8]}, {devices=[1,2,2,1,2]<=[2,4]T(1,0) \"\n \"last_tile_dim_replicate}}\"));\n}\nTEST_F(ShardingPropagationTest, CallPropagation) {\n const absl::string_view hlo_string = R\"(\nHloModule module\ncalled_computation {\n p0 = bf16[20,2,68096,8512] parameter(0)\n %add_called_comp = bf16[20,2,68096,8512] add(p0, p0)\n ROOT tuple = (bf16[20,2,68096,8512]) tuple(add_called_comp)\n}\nENTRY main {\n %param0 = bf16[20,2,68096,8512] parameter(0)\n %add = bf16[20,2,68096,8512] add(param0, param0)\n ROOT %call = (bf16[20,2,68096,8512]) call(add), to_apply=%called_computation, sharding={{devices=[1,1,16,64]<=[64,16]T(1,0)}}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* add = FindInstruction(module.get(), \"add\");\n ASSERT_NE(add, nullptr);\n EXPECT_THAT(add, op::Sharding(\"{devices=[1,1,16,64]<=[64,16]T(1,0)}\"));\n}\nTEST_F(ShardingPropagationTest, CallPropagationWithSPMDShardToFullShape) {\n const absl::string_view hlo_string = R\"(\nHloModule module\ncalled_computation {\n p0 = bf16[4096,4096] parameter(0)\n %add_called_comp = bf16[4096,4096] add(p0, p0)\n ROOT tuple = (bf16[4096,4096]) tuple(add_called_comp)\n}\nENTRY main {\n %param0 = bf16[4096,4096] parameter(0)\n %add = bf16[4096,4096] add(param0, param0)\n %custom-call.1 = bf16[4096,4096]{1,0} custom-call(add), custom_call_target=\"Sharding\", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}\n %custom-call.2 = bf16[2048,4096]{1,0} custom-call(custom-call.1), custom_call_target=\"SPMDFullToShardShape\", sharding={manual}\n %custom-call.3 = bf16[2048,4096]{1,0} custom-call(custom-call.2), custom_call_target=\"Sharding\", sharding={manual}\n %custom-call.4 = bf16[4096,4096]{1,0} custom-call(bf16[2048,4096]{1,0} %custom-call.3), custom_call_target=\"SPMDShardToFullShape\", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}\n ROOT %call = (bf16[4096,4096]) call(custom-call.4), to_apply=%called_computation, sharding={devices=[2,2]<=[4]}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {false},\n {false})\n .Run(module.get()));\n XLA_VLOG_LINES(1, module->ToString());\n EXPECT_TRUE(changed);\n auto* custom_call_4 = FindInstruction(module.get(), \"custom-call.4\");\n ASSERT_NE(custom_call_4, nullptr);\n auto* operand = custom_call_4->operand(0);\n EXPECT_THAT(operand, op::Shape(\"bf16[2048,4096]\"));\n EXPECT_THAT(custom_call_4, op::Shape(\"bf16[4096,4096]\"));\n EXPECT_THAT(custom_call_4,\n op::Sharding(\"{devices=[2,1,2]<=[4] last_tile_dim_replicate}\"));\n}\nTEST_F(ShardingPropagationTest, ReplicateRngBitGeneratorSeed) {\n const char* const hlo_string = R\"(\nHloModule module\napply_or {\n x = u64[] parameter(0)\n y = u64[] parameter(1)\n ROOT x_or_y = or(x, y)\n}\nENTRY main {\n p = s32[2,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}\n up = u64[2,2] convert(p)\n i = u64[] constant(0)\n seed = u64[2] reduce(up, i), dimensions={1}, to_apply=apply_or\n rbg = u32[2048,4096] rng-bit-generator(seed), algorithm=rng_default\n ROOT s = u32[2048,4096]{1,0} custom-call(rbg), custom_call_target=\"Sharding\", sharding={devices=[2,2]<=[4]}\n})\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n TF_ASSERT_OK_AND_ASSIGN(\n bool changed,\n ShardingPropagation(\n true, true,\n {true},\n {true})\n .Run(module.get()));\n EXPECT_TRUE(changed);\n XLA_VLOG_LINES(1, module->ToString());\n auto* instruction = FindInstruction(module.get(), \"seed\");\n EXPECT_TRUE(instruction->sharding().IsReplicated());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":277,"cells":{"ID":{"kind":"string","value":"4a0d1112-f66c-4f43-aa09-131a59dde473"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"save"},"File Path in Repository":{"kind":"string","value":"tensorflow/cc/experimental/libexport/save.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/cc/experimental/libexport/save_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/cc/experimental/libexport/save.h\"\n#include \"tensorflow/core/platform/env.h\"\nnamespace tensorflow {\nnamespace libexport {\nStatus Save(const std::string& export_dir) {\n TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir));\n return absl::OkStatus();\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/cc/experimental/libexport/save.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/env.h\"\n#include \"tensorflow/core/platform/path.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace libexport {\nnamespace {\nTEST(SaveTest, TestDirectoryStructure) {\n const string base_dir = tensorflow::io::JoinPath(\n tensorflow::testing::TmpDir(), \"test_directory_structure\");\n TF_ASSERT_OK(Save(base_dir));\n TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":278,"cells":{"ID":{"kind":"string","value":"10b07a94-816f-47d6-ab5a-942410b3ce6e"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"fuse_auto_input"},"File Path in Repository":{"kind":"string","value":"tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/str_replace.h\"\n#include \"absl/types/any.h\"\n#include \"tensorflow/lite/delegates/gpu/common/model.h\"\n#include \"tensorflow/lite/delegates/gpu/common/model_transformer.h\"\n#include \"tensorflow/lite/delegates/gpu/common/types.h\"\n#include \"tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h\"\n#include \"tensorflow/lite/delegates/gpu/gl/node_shader.h\"\nnamespace tflite {\nnamespace gpu {\nnamespace gl {\nnamespace {\nstd::pair MakeValueReplacement(int n, int k) {\n return {absl::StrCat(\"value_\", n), absl::StrCat(\"value_\", k)};\n}\nstd::pair MakeDataReplacement(int n, int k) {\n return {absl::StrCat(\"input_data_\", n), absl::StrCat(\"input_data_\", k)};\n}\n} \nTransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) {\n auto& node_attr =\n std::any_cast(node->operation.attributes);\n auto& node_code = node_attr.code;\n if (node_code.input != IOStructure::AUTO) {\n return {TransformStatus::SKIPPED, \"\"};\n }\n uint3 workgroup = node_code.workgroup;\n auto node_outputs = graph->FindOutputs(node->id);\n std::vector> nodes_to_fuse;\n std::vector> input_values;\n int input_num = -1;\n for (auto input_value : graph->FindInputs(node->id)) {\n input_num++;\n const ValueId input_id = input_value->id;\n input_values.push_back({input_id, input_num});\n if (graph->FindConsumers(input_id).size() > 1) {\n continue; \n }\n Node* input_producer = graph->FindProducer(input_id);\n if (input_producer == nullptr) {\n continue; \n }\n if (graph->FindOutputs(input_producer->id).size() != 1) {\n continue; \n }\n auto& input_producer_attr = std::any_cast(\n input_producer->operation.attributes);\n if (input_producer_attr.code.output != IOStructure::AUTO) {\n continue;\n }\n if (input_producer_attr.code.workload != node_code.workload &&\n uint3() != input_producer_attr.code.workload) {\n continue;\n }\n if (input_producer_attr.code.workgroup != uint3()) {\n if (workgroup != uint3()) {\n continue;\n }\n workgroup = input_producer_attr.code.workgroup;\n }\n nodes_to_fuse.push_back({input_producer, input_num});\n input_values.pop_back(); \n }\n if (nodes_to_fuse.empty()) {\n return {TransformStatus::SKIPPED, \"\"};\n }\n {\n absl::flat_hash_set all_inputs;\n for (const auto& node_to_fuse : nodes_to_fuse) {\n for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) {\n if (all_inputs.find(input->id) != all_inputs.end()) {\n return {TransformStatus::SKIPPED, \"\"};\n }\n all_inputs.insert(input->id);\n }\n }\n for (const auto& input : graph->FindInputs(node->id)) {\n if (all_inputs.find(input->id) != all_inputs.end()) {\n return {TransformStatus::SKIPPED, \"\"};\n }\n all_inputs.insert(input->id);\n }\n }\n for (auto value : graph->FindInputs(node->id)) {\n if (!graph->RemoveConsumer(node->id, value->id).ok()) {\n return {TransformStatus::INVALID, \"\"};\n }\n }\n std::string operation_type;\n std::string source_code;\n std::string values;\n std::swap(source_code, node_code.source_code);\n int extra_input_num = input_num;\n input_num = 0;\n for (auto input_and_num : nodes_to_fuse) {\n auto& input = input_and_num.first;\n auto& attr =\n std::any_cast(input->operation.attributes);\n auto super_inputs = graph->FindInputs(input->id);\n std::vector> replacements;\n for (int i = 0; i < super_inputs.size(); ++i) {\n int value_index = i == 0 ? input_and_num.second : ++extra_input_num;\n replacements.push_back(MakeValueReplacement(i, value_index));\n replacements.push_back(MakeDataReplacement(i, input_num));\n if (attr.code.input == IOStructure::AUTO) {\n absl::StrAppend(&values, \" value_\", value_index, \" = $input_data_\",\n input_num, \"[gid.x, gid.y, gid.z]$;\\n\");\n }\n if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) {\n return {TransformStatus::INVALID, \"\"};\n }\n input_num++;\n }\n for (auto& param : attr.code.parameters) {\n param.name = absl::StrReplaceAll(param.name, replacements);\n }\n attr.code.source_code =\n absl::StrReplaceAll(attr.code.source_code, replacements);\n if (!MergeCode(&attr, &node_attr).ok()) {\n return {TransformStatus::INVALID, \"Unable to merge the code\"};\n }\n absl::StrAppend(&node_attr.code.source_code, \"{\\n\", attr.code.source_code,\n \"\\n}\");\n if (!operation_type.empty()) {\n operation_type += \",\";\n }\n operation_type += input->operation.type;\n if (!graph->DeleteNode(input->id).ok()) {\n return {TransformStatus::INVALID, \"\"};\n }\n }\n for (int i = 0; i < input_values.size(); i++) {\n if (node_code.input == IOStructure::AUTO) {\n absl::StrAppend(&values, \" value_\", input_values[i].second,\n \" = $input_data_\", input_num,\n \"[gid.x, gid.y, gid.z]$;\\n\");\n }\n if (!graph->AddConsumer(node->id, input_values[i].first).ok()) {\n return {TransformStatus::INVALID, \"\"};\n }\n input_num++;\n }\n node_code.input = IOStructure::ONLY_DEFINITIONS;\n absl::StrAppend(&node->operation.type, \"(\", operation_type, \")\");\n node_code.source_code =\n absl::StrCat(values, node_code.source_code, \"{\n node->operation.type, \"\\n\", source_code, \"\\n}\");\n return {TransformStatus::APPLIED, \"\"};\n}\n} \n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h\"\n#include \n#include \n#include \n#include \"absl/types/any.h\"\n#include \"tensorflow/lite/delegates/gpu/common/model.h\"\n#include \"tensorflow/lite/delegates/gpu/common/model_transformer.h\"\n#include \"tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h\"\n#include \"tensorflow/lite/delegates/gpu/gl/node_shader.h\"\nnamespace tflite {\nnamespace gpu {\nnamespace gl {\nnamespace {\nTEST(FuseAutoInputTest, SkipsDiamond) {\n GraphFloat32 graph;\n auto* v0 = graph.NewValue();\n auto* v1 = graph.NewValue();\n auto* v2 = graph.NewValue();\n auto* v3 = graph.NewValue();\n auto* n1 = graph.NewNode();\n CompiledNodeAttributes a1;\n a1.code.output = IOStructure::AUTO;\n n1->operation.attributes = std::move(a1);\n ASSERT_OK(graph.AddConsumer(n1->id, v0->id));\n ASSERT_OK(graph.SetProducer(n1->id, v1->id));\n auto* n2 = graph.NewNode();\n CompiledNodeAttributes a2;\n a2.code.output = IOStructure::AUTO;\n n2->operation.attributes = std::move(a2);\n ASSERT_OK(graph.AddConsumer(n2->id, v0->id));\n ASSERT_OK(graph.SetProducer(n2->id, v2->id));\n auto* n3 = graph.NewNode();\n CompiledNodeAttributes a3;\n a3.code.input = IOStructure::AUTO;\n n3->operation.attributes = std::move(a3);\n ASSERT_OK(graph.AddConsumer(n3->id, v1->id));\n ASSERT_OK(graph.AddConsumer(n3->id, v2->id));\n ASSERT_OK(graph.SetProducer(n3->id, v3->id));\n FuseAutoInput fuse_auto_input;\n EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status,\n TransformStatus::SKIPPED);\n}\nTEST(FuseAutoInputTest, SkipsTriangle) {\n GraphFloat32 graph;\n auto* v0 = graph.NewValue();\n auto* v1 = graph.NewValue();\n auto* v2 = graph.NewValue();\n auto* n1 = graph.NewNode();\n CompiledNodeAttributes a1;\n a1.code.output = IOStructure::AUTO;\n n1->operation.attributes = std::move(a1);\n ASSERT_OK(graph.AddConsumer(n1->id, v0->id));\n ASSERT_OK(graph.SetProducer(n1->id, v1->id));\n auto* n2 = graph.NewNode();\n CompiledNodeAttributes a2;\n a2.code.input = IOStructure::AUTO;\n n2->operation.attributes = std::move(a2);\n ASSERT_OK(graph.AddConsumer(n2->id, v0->id));\n ASSERT_OK(graph.AddConsumer(n2->id, v1->id));\n ASSERT_OK(graph.SetProducer(n2->id, v2->id));\n FuseAutoInput fuse_auto_input;\n EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status,\n TransformStatus::SKIPPED);\n}\n} \n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":279,"cells":{"ID":{"kind":"string","value":"2e05ec21-ce2b-4ad2-803e-f2288754db8c"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/arolla"},"File Name":{"kind":"string","value":"backend_operator"},"File Path in Repository":{"kind":"string","value":"arolla/expr/operator_loader/backend_operator.cc"},"File Path for Unit Test":{"kind":"string","value":"arolla/expr/operator_loader/backend_operator_test.cc"},"Code":{"kind":"string","value":"#include \"arolla/expr/operator_loader/backend_operator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_join.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/span.h\"\n#include \"arolla/expr/basic_expr_operator.h\"\n#include \"arolla/expr/expr.h\"\n#include \"arolla/expr/expr_attributes.h\"\n#include \"arolla/expr/expr_node.h\"\n#include \"arolla/expr/expr_operator.h\"\n#include \"arolla/expr/expr_operator_signature.h\"\n#include \"arolla/expr/operator_loader/parameter_qtypes.h\"\n#include \"arolla/expr/operator_loader/qtype_constraint.h\"\n#include \"arolla/expr/operator_loader/qtype_inference.h\"\n#include \"arolla/util/fingerprint.h\"\n#include \"arolla/util/status_macros_backport.h\"\nnamespace arolla::operator_loader {\nusing ::arolla::expr::ExprAttributes;\nusing ::arolla::expr::ExprNodePtr;\nusing ::arolla::expr::ExprOperatorPtr;\nusing ::arolla::expr::ExprOperatorSignature;\nusing ::arolla::expr::GetPlaceholderKeys;\nabsl::StatusOr BackendOperator::Make(\n absl::string_view name, ExprOperatorSignature signature,\n absl::string_view doc, std::vector qtype_constraints,\n ExprNodePtr qtype_inference_expr) {\n RETURN_IF_ERROR(ValidateSignature(signature));\n absl::flat_hash_set parameter_names;\n for (const auto& param : signature.parameters) {\n parameter_names.insert(param.name);\n }\n std::set undefined_parameter_names;\n for (const auto& qtype_constraint : qtype_constraints) {\n for (auto&& placeholder_key :\n GetPlaceholderKeys(qtype_constraint.predicate_expr)) {\n if (!parameter_names.contains(placeholder_key)) {\n undefined_parameter_names.insert(std::move(placeholder_key));\n }\n }\n }\n for (auto&& placeholder_key : GetPlaceholderKeys(qtype_inference_expr)) {\n if (!parameter_names.contains(placeholder_key)) {\n undefined_parameter_names.insert(std::move(placeholder_key));\n }\n }\n if (!undefined_parameter_names.empty()) {\n return absl::InvalidArgumentError(\n \"unexpected parameters: P.\" +\n absl::StrJoin(undefined_parameter_names, \", P.\"));\n }\n ASSIGN_OR_RETURN(\n auto qtype_inference_fn,\n MakeQTypeInferenceFn(qtype_constraints, qtype_inference_expr));\n FingerprintHasher hasher(\"::arolla::operator_loader::BackendOperator\");\n hasher.Combine(name, signature, doc, qtype_inference_expr->fingerprint(),\n qtype_constraints.size());\n for (const auto& qtype_constraint : qtype_constraints) {\n hasher.Combine(qtype_constraint.predicate_expr->fingerprint(),\n qtype_constraint.error_message);\n }\n return std::make_shared(\n PrivateConstructorTag{}, name, std::move(signature), doc,\n std::move(hasher).Finish(), std::move(qtype_constraints),\n std::move(qtype_inference_expr), std::move(qtype_inference_fn));\n}\nBackendOperator::BackendOperator(PrivateConstructorTag, absl::string_view name,\n ExprOperatorSignature signature,\n absl::string_view doc, Fingerprint fingerprint,\n std::vector qtype_constraints,\n ExprNodePtr qtype_inference_expr,\n QTypeInferenceFn qtype_inference_fn)\n : ExprOperatorWithFixedSignature(name, std::move(signature), doc,\n fingerprint),\n qtype_constraints_(std::move(qtype_constraints)),\n qtype_inference_expr_(std::move(qtype_inference_expr)),\n qtype_inference_fn_(std::move(qtype_inference_fn)) {}\nabsl::StatusOr BackendOperator::InferAttributes(\n absl::Span inputs) const {\n RETURN_IF_ERROR(ValidateOpInputsCount(inputs));\n ASSIGN_OR_RETURN(auto parameter_qtypes,\n ExtractParameterQTypes(signature(), inputs));\n ASSIGN_OR_RETURN(auto* output_qtype, qtype_inference_fn_(parameter_qtypes));\n return ExprAttributes(output_qtype);\n}\nabsl::string_view BackendOperator::py_qvalue_specialization_key() const {\n return \"::arolla::operator_loader::BackendOperator\";\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"arolla/expr/operator_loader/backend_operator.h\"\n#include \n#include \n#include \n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/status_matchers.h\"\n#include \"absl/status/statusor.h\"\n#include \"arolla/array/array.h\"\n#include \"arolla/array/qtype/types.h\"\n#include \"arolla/dense_array/dense_array.h\"\n#include \"arolla/dense_array/qtype/types.h\"\n#include \"arolla/expr/eval/invoke.h\"\n#include \"arolla/expr/expr.h\"\n#include \"arolla/expr/expr_operator.h\"\n#include \"arolla/expr/expr_operator_signature.h\"\n#include \"arolla/expr/operator_loader/qtype_constraint.h\"\n#include \"arolla/memory/optional_value.h\"\n#include \"arolla/qtype/qtype.h\"\n#include \"arolla/qtype/qtype_traits.h\"\n#include \"arolla/qtype/tuple_qtype.h\"\n#include \"arolla/util/unit.h\"\n#include \"arolla/util/status_macros_backport.h\"\nnamespace arolla::operator_loader {\nnamespace {\nusing ::absl_testing::IsOkAndHolds;\nusing ::absl_testing::StatusIs;\nusing ::arolla::expr::CallOp;\nusing ::arolla::expr::ExprOperatorPtr;\nusing ::arolla::expr::ExprOperatorSignature;\nusing ::arolla::expr::Literal;\nusing ::arolla::expr::Placeholder;\nusing ::testing::HasSubstr;\nclass BackendOperatorTest : public ::testing::Test {\n protected:\n absl::StatusOr> MakeOp() {\n ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_1,\n CallOp(\"core.not_equal\", {CallOp(\"qtype.get_scalar_qtype\",\n {Placeholder(\"x\")}),\n Literal(GetNothingQType())}));\n ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_2,\n CallOp(\"core.not_equal\", {CallOp(\"qtype.get_scalar_qtype\",\n {Placeholder(\"y\")}),\n Literal(GetNothingQType())}));\n ASSIGN_OR_RETURN(\n auto qtype_constraint_predicate_expr_3,\n CallOp(\"core.not_equal\", {CallOp(\"qtype.broadcast_qtype_like\",\n {Placeholder(\"y\"), Placeholder(\"x\")}),\n Literal(GetNothingQType())}));\n std::vector qtype_constraints = {\n {qtype_constraint_predicate_expr_1,\n \"expected `x` to be a scalar based type, got {x}\"},\n {qtype_constraint_predicate_expr_2,\n \"expected `y` to be a UNIT based type, got {y}\"},\n {qtype_constraint_predicate_expr_3,\n \"incompatible types x:{x} and y:{y}\"},\n };\n ASSIGN_OR_RETURN(auto qtype_inference_expr,\n CallOp(\"qtype.broadcast_qtype_like\",\n {Placeholder(\"y\"), Placeholder(\"x\")}));\n ASSIGN_OR_RETURN(\n auto op, BackendOperator::Make(\n \"core.presence_and\", ExprOperatorSignature{{\"x\"}, {\"y\"}},\n \"presence-and-doc-string\", std::move(qtype_constraints),\n std::move(qtype_inference_expr)));\n return std::dynamic_pointer_cast(op);\n }\n};\nTEST_F(BackendOperatorTest, GetDoc) {\n ASSERT_OK_AND_ASSIGN(auto op, MakeOp());\n ASSERT_THAT(op.get()->doc(), \"presence-and-doc-string\");\n ASSERT_THAT(op->GetDoc(), IsOkAndHolds(\"presence-and-doc-string\"));\n}\nTEST_F(BackendOperatorTest, QTypeInference) {\n {\n ASSERT_OK_AND_ASSIGN(auto expr,\n CallOp(MakeOp(), {Literal(1.5f), Literal(kUnit)}));\n EXPECT_EQ(expr->qtype(), GetQType());\n }\n {\n ASSERT_OK_AND_ASSIGN(\n auto expr,\n CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue())}));\n EXPECT_EQ(expr->qtype(), GetQType>());\n }\n}\nTEST_F(BackendOperatorTest, QTypeConstraint) {\n EXPECT_THAT(\n CallOp(MakeOp(), {Literal(MakeTupleFromFields()), Literal(kUnit)}),\n StatusIs(\n absl::StatusCode::kInvalidArgument,\n HasSubstr(\"expected `x` to be a scalar based type, got tuple<>\")));\n EXPECT_THAT(\n CallOp(MakeOp(), {Literal(1.5f), Literal(MakeTupleFromFields())}),\n StatusIs(absl::StatusCode::kInvalidArgument,\n HasSubstr(\"expected `y` to be a UNIT based type, got tuple<>\")));\n EXPECT_THAT(\n CallOp(MakeOp(), {Literal(Array()), Literal(DenseArray())}),\n StatusIs(\n absl::StatusCode::kInvalidArgument,\n HasSubstr(\n \"incompatible types x:ARRAY_FLOAT32 and y:DENSE_ARRAY_UNIT\")));\n}\nTEST_F(BackendOperatorTest, Eval) {\n ASSERT_OK_AND_ASSIGN(\n auto expr,\n CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue())}));\n ASSERT_OK_AND_ASSIGN(auto result_tv, Invoke(expr, {}));\n ASSERT_OK_AND_ASSIGN(auto result, result_tv.As>());\n EXPECT_EQ(result.get(), std::nullopt);\n}\nTEST_F(BackendOperatorTest, UnexpectedParameters) {\n ASSERT_OK_AND_ASSIGN(auto op, MakeOp());\n auto& backend_op = dynamic_cast(*op);\n EXPECT_THAT(BackendOperator::Make(\"core.presence_and\",\n ExprOperatorSignature{{\"a\"}, {\"b\"}},\n \"docstring\", backend_op.qtype_constraints(),\n backend_op.qtype_inference_expr()),\n StatusIs(absl::StatusCode::kInvalidArgument,\n HasSubstr(\"unexpected parameters: P.x, P.y\")));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator_test.cc"},"Commit Hash":{"kind":"string","value":"1ca990dbeca224035efdabffecc7f3738df6b52c"}}},{"rowIdx":280,"cells":{"ID":{"kind":"string","value":"74a88cec-4e14-40d9-854d-624c46d9da7a"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/arolla"},"File Name":{"kind":"string","value":"regex"},"File Path in Repository":{"kind":"string","value":"arolla/qtype/strings/regex.cc"},"File Path for Unit Test":{"kind":"string","value":"arolla/qtype/strings/regex_test.cc"},"Code":{"kind":"string","value":"#include \"arolla/qtype/strings/regex.h\"\n#include \n#include \n#include \"absl/base/nullability.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"arolla/qtype/simple_qtype.h\"\n#include \"arolla/util/fingerprint.h\"\n#include \"arolla/util/repr.h\"\n#include \"re2/re2.h\"\nnamespace arolla {\nnamespace {\nclass RE2Regex final : public Regex {\n public:\n explicit RE2Regex(absl::string_view pattern) : re2_(pattern, RE2::Quiet) {}\n bool ok() const { return re2_.ok(); }\n absl::string_view error() const { return re2_.error(); }\n absl::string_view pattern() const final { return re2_.pattern(); }\n int NumberOfCapturingGroups() const final {\n return re2_.NumberOfCapturingGroups();\n }\n bool PartialMatch(absl::string_view text) const final {\n return re2_.PartialMatch(text, re2_);\n }\n bool PartialMatch(absl::string_view text, std::string* match) const final {\n return RE2::PartialMatch(text, re2_, match);\n }\n private:\n RE2 re2_;\n};\n} \nabsl::StatusOr> CompileRegex(\n absl::string_view pattern) {\n auto result = std::make_shared(pattern);\n if (result->ok()) {\n return result;\n }\n return absl::InvalidArgumentError(absl::StrCat(\n \"invalid regular expression: `\", pattern, \"`; \", result->error()));\n}\nvoid FingerprintHasherTraits::operator()(\n FingerprintHasher* hasher, const RegexPtr& value) const {\n if (value != nullptr) {\n hasher->Combine(value->pattern());\n }\n}\nReprToken ReprTraits::operator()(const RegexPtr& value) const {\n if (value == nullptr) {\n return {\"regex{}\"};\n }\n return {absl::StrCat(\"regex{`\", value->pattern(), \"`}\")};\n}\nAROLLA_DEFINE_SIMPLE_QTYPE(REGEX, RegexPtr)\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"arolla/qtype/strings/regex.h\"\n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/status_matchers.h\"\n#include \"arolla/qtype/qtype_traits.h\"\n#include \"arolla/qtype/typed_value.h\"\n#include \"arolla/util/fingerprint.h\"\n#include \"arolla/util/repr.h\"\nusing ::absl_testing::StatusIs;\nusing ::testing::HasSubstr;\nnamespace arolla {\nnamespace {\nTEST(Regex, NoCapturingGroups) {\n ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex(\"\\\\d+ bottles of beer\"));\n ASSERT_NE(regex, nullptr);\n EXPECT_EQ(regex->NumberOfCapturingGroups(), 0);\n EXPECT_TRUE(regex->PartialMatch(\"100 bottles of beer\"));\n std::string match;\n EXPECT_FALSE(regex->PartialMatch(\"100 bottles of beer\", &match));\n}\nTEST(Regex, OneCapturingGroup) {\n ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex(\"(\\\\d+) bottles of beer\"));\n ASSERT_NE(regex, nullptr);\n EXPECT_EQ(regex->NumberOfCapturingGroups(), 1);\n EXPECT_TRUE(regex->PartialMatch(\"100 bottles of beer\"));\n std::string match;\n EXPECT_TRUE(regex->PartialMatch(\"100 bottles of beer\", &match));\n EXPECT_EQ(match, \"100\");\n}\nTEST(Regex, ManyCapturingGroup) {\n ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex(\"(\\\\d+) (bottles) (of) beer\"));\n ASSERT_NE(regex, nullptr);\n EXPECT_EQ(regex->NumberOfCapturingGroups(), 3);\n EXPECT_TRUE(regex->PartialMatch(\"100 bottles of beer\"));\n std::string match;\n EXPECT_TRUE(regex->PartialMatch(\"100 bottles of beer\", &match));\n EXPECT_EQ(match, \"100\");\n}\nTEST(Regex, Repr) {\n ASSERT_OK_AND_ASSIGN(auto regex1, CompileRegex(\"abc\"));\n ASSERT_OK_AND_ASSIGN(auto regex2, CompileRegex(\"a.c\"));\n EXPECT_EQ(regex1->pattern(), \"abc\");\n EXPECT_EQ(regex2->pattern(), \"a.c\");\n EXPECT_EQ(Repr(RegexPtr{}), \"regex{}\");\n EXPECT_EQ(Repr(regex1), \"regex{`abc`}\");\n EXPECT_EQ(Repr(regex2), \"regex{`a.c`}\");\n}\nTEST(Regex, Fingerprint) {\n ASSERT_OK_AND_ASSIGN(auto regex1_1, CompileRegex(\"abc\"));\n ASSERT_OK_AND_ASSIGN(auto regex1_2, CompileRegex(\"abc\"));\n ASSERT_OK_AND_ASSIGN(auto regex2_1, CompileRegex(\"a.c\"));\n ASSERT_OK_AND_ASSIGN(auto regex2_2, CompileRegex(\"a.c\"));\n auto fingerprint0_1 = FingerprintHasher(\"salt\").Combine(RegexPtr{}).Finish();\n auto fingerprint0_2 = FingerprintHasher(\"salt\").Combine(RegexPtr{}).Finish();\n auto fingerprint1_1 = FingerprintHasher(\"salt\").Combine(regex1_1).Finish();\n auto fingerprint1_2 = FingerprintHasher(\"salt\").Combine(regex1_2).Finish();\n auto fingerprint2_1 = FingerprintHasher(\"salt\").Combine(regex2_1).Finish();\n auto fingerprint2_2 = FingerprintHasher(\"salt\").Combine(regex2_2).Finish();\n EXPECT_EQ(fingerprint0_1, fingerprint0_2);\n EXPECT_EQ(fingerprint1_1, fingerprint1_2);\n EXPECT_EQ(fingerprint2_1, fingerprint2_2);\n EXPECT_NE(fingerprint0_1, fingerprint1_1);\n EXPECT_NE(fingerprint1_1, fingerprint2_1);\n EXPECT_NE(fingerprint2_1, fingerprint0_1);\n}\nTEST(Regex, QType) {\n EXPECT_EQ(GetQType()->name(), \"REGEX\");\n EXPECT_EQ(GetQType()->type_info(), typeid(RegexPtr));\n ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex(\"a.c\"));\n auto qvalue = TypedValue::FromValue(regex);\n EXPECT_EQ(qvalue.Repr(), \"regex{`a.c`}\");\n}\nTEST(Regex, CompilationError) {\n EXPECT_THAT(CompileRegex(\"ab\\\\αcd\"),\n StatusIs(absl::StatusCode::kInvalidArgument,\n HasSubstr(\"invalid regular expression: `ab\\\\αcd`;\")));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex_test.cc"},"Commit Hash":{"kind":"string","value":"1ca990dbeca224035efdabffecc7f3738df6b52c"}}},{"rowIdx":281,"cells":{"ID":{"kind":"string","value":"16498d21-1d10-4b45-ae7a-9b43a041a5b6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"memory_space_propagation"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/memory_space_propagation.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/memory_space_propagation_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/memory_space_propagation.h\"\n#include \n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\nnamespace xla {\nabsl::StatusOr MemorySpacePropagation::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool modified = false;\n TF_ASSIGN_OR_RETURN(auto dataflow_analysis,\n HloDataflowAnalysis::Run(*module, false,\n true));\n dataflow_analysis_ = std::move(dataflow_analysis);\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloInstruction* instruction : computation->instructions()) {\n if (instruction->opcode() == HloOpcode::kFusion) {\n for (int operand_idx = 0;\n operand_idx < instruction->fused_parameters().size();\n ++operand_idx) {\n ShapeUtil::ForEachLeafShape(\n instruction->operand(operand_idx)->shape(),\n [&](const Shape& sub_shape, const ShapeIndex& index) {\n int64_t memory_space = sub_shape.layout().memory_space();\n modified |=\n Propagate(index, instruction->fused_parameter(operand_idx),\n memory_space);\n });\n }\n ShapeUtil::ForEachLeafShape(\n instruction->shape(),\n [&](const Shape& sub_shape, const ShapeIndex& index) {\n int64_t memory_space = sub_shape.layout().memory_space();\n modified |= Propagate(index, instruction->fused_expression_root(),\n memory_space);\n });\n }\n }\n }\n return modified;\n}\nbool MemorySpacePropagation::Propagate(ShapeIndexView index,\n const HloInstruction* callee_instruction,\n int64_t memory_space) const {\n bool modified = false;\n const HloValue& value = dataflow_analysis_->GetUniqueValueAt(\n callee_instruction, ShapeIndex(index));\n for (const HloPosition& position : value.positions()) {\n HloInstruction* instruction = position.instruction;\n Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(),\n position.index);\n if (shape->layout().memory_space() == memory_space) {\n continue;\n }\n shape->mutable_layout()->set_memory_space(memory_space);\n modified = true;\n if (instruction->opcode() == HloOpcode::kFusion) {\n Propagate(position.index, instruction->fused_expression_root(),\n memory_space);\n }\n const HloInstruction* parent_fusion =\n instruction->parent()->FusionInstruction();\n if (instruction == instruction->parent()->root_instruction() &&\n parent_fusion->parent()->IsFusionComputation()) {\n Propagate(position.index, parent_fusion, memory_space);\n }\n if (instruction->opcode() == HloOpcode::kParameter &&\n parent_fusion->parent()->IsFusionComputation()) {\n const HloInstruction* fusion_operand =\n parent_fusion->operand(instruction->parameter_number());\n Propagate(position.index, fusion_operand, memory_space);\n }\n }\n for (const HloUse& use : value.GetUses()) {\n if (use.instruction->opcode() == HloOpcode::kFusion) {\n modified |= Propagate(\n use.operand_index,\n use.instruction->fused_parameter(use.operand_number), memory_space);\n }\n }\n return modified;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/memory_space_propagation.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\nnamespace xla {\nnamespace {\nclass MemorySpacePropagationTest : public HloTestBase {\n public:\n MemorySpacePropagationTest()\n : HloTestBase(),\n verifier_(false, false) {\n }\n absl::Status Verify(HloModule* module) {\n return verifier_.Run(module).status();\n }\n private:\n HloVerifier verifier_;\n};\nTEST_F(MemorySpacePropagationTest, NoMemorySpace) {\n absl::string_view hlo_string = R\"(\n HloModule NoMemorySpace\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)} parameter(0)\n ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)} copy(%param2)\n %fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[6]{0:T(128)} copy(%fusion)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnVerifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_FALSE(memory_space_propagation.Run(module.get()).value());\n TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\nTEST_F(MemorySpacePropagationTest, NonTupleOutput) {\n absl::string_view hlo_string = R\"(\n HloModule NonTupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)} parameter(0)\n ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[6]{0:T(128)} copy(%fusion)\n }\n )\";\n absl::string_view expected_hlo_string = R\"(\n HloModule NonTupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)\n ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[6]{0:T(128)} copy(%fusion)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());\n TF_EXPECT_OK(Verify(module.get()));\n TF_ASSERT_OK_AND_ASSIGN(auto ref,\n ParseAndReturnVerifiedModule(expected_hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\nTEST_F(MemorySpacePropagationTest, TupleOutput) {\n absl::string_view hlo_string = R\"(\n HloModule TupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)} parameter(0)\n %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0\n %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1\n ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)\n }\n )\";\n absl::string_view expected_hlo_string = R\"(\n HloModule TupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)\n %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0\n %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1\n ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());\n TF_EXPECT_OK(Verify(module.get()));\n TF_ASSERT_OK_AND_ASSIGN(auto ref,\n ParseAndReturnVerifiedModule(expected_hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\nTEST_F(MemorySpacePropagationTest, NestedInputFusion) {\n absl::string_view hlo_string = R\"(\n HloModule NestedFusion\n %bitcast_fusion {\n %bf_param = s32[3,2]{0,1:T(128)} parameter(0)\n ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)\n }\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[3,2]{0,1:T(128)} parameter(0)\n %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion\n ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)\n }\n ENTRY %entry {\n %param0 = s32[3,2]{0,1:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[6]{0:T(128)} copy(%fusion)\n }\n )\";\n absl::string_view expected_hlo_string = R\"(\n HloModule NestedFusion\n %bitcast_fusion {\n %bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0)\n ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)\n }\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0)\n %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion\n ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)\n }\n ENTRY %entry {\n %param0 = s32[3,2]{0,1:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[6]{0:T(128)} copy(%fusion)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());\n TF_EXPECT_OK(Verify(module.get()));\n TF_ASSERT_OK_AND_ASSIGN(auto ref,\n ParseAndReturnVerifiedModule(expected_hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\nTEST_F(MemorySpacePropagationTest, NestedOutputFusion) {\n absl::string_view hlo_string = R\"(\n HloModule NestedFusion\n %bitcast_fusion {\n %bf_param = s32[6]{0:T(128)} parameter(0)\n ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param)\n }\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)} parameter(0)\n %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)\n }\n )\";\n absl::string_view expected_hlo_string = R\"(\n HloModule NestedFusion\n %bitcast_fusion {\n %bf_param = s32[6]{0:T(128)} parameter(0)\n ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param)\n }\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)\n %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)\n ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());\n TF_EXPECT_OK(Verify(module.get()));\n TF_ASSERT_OK_AND_ASSIGN(auto ref,\n ParseAndReturnVerifiedModule(expected_hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\nTEST_F(MemorySpacePropagationTest, BitcastInFusion) {\n absl::string_view hlo_string = R\"(\n HloModule TupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)} parameter(0)\n %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1)\n %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)\n ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n }\n )\";\n absl::string_view expected_hlo_string = R\"(\n HloModule TupleOutput\n %fused_computation {\n %param_1.3 = s32[1]{0:T(128)} parameter(1)\n %constant.2 = s32[]{:T(128)} constant(-2147483648)\n %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5\n %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)\n %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0\n %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)\n %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)\n %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1)\n %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)\n ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)\n }\n ENTRY %entry {\n %param0 = s32[6]{0:T(128)} parameter(0)\n %param1 = s32[1]{0:T(128)} parameter(1)\n %param2 = s32[5]{0:T(128)} parameter(2)\n %arg0 = s32[6]{0:T(128)S(1)} copy(%param0)\n %arg1 = s32[1]{0:T(128)} copy(%param1)\n %arg2 = s32[5]{0:T(128)S(1)} copy(%param2)\n ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation\n }\n )\";\n TF_ASSERT_OK_AND_ASSIGN(auto module,\n ParseAndReturnUnverifiedModule(hlo_string));\n MemorySpacePropagation memory_space_propagation;\n EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());\n TF_EXPECT_OK(Verify(module.get()));\n TF_ASSERT_OK_AND_ASSIGN(auto ref,\n ParseAndReturnVerifiedModule(expected_hlo_string));\n EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":282,"cells":{"ID":{"kind":"string","value":"9cb97eb8-0ab8-44ff-92e5-3ba9b6eeb3a6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"cudnn_vectorize_convolutions"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/transforms/cudnn_vectorize_convolutions.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/strings/string_view.h\"\n#include \"xla/client/xla_builder.h\"\n#include \"xla/client/xla_computation.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_clone_context.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/primitive_util.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/service/gpu/cudnn_support_utils.h\"\n#include \"xla/service/gpu/stream_executor_util.h\"\n#include \"xla/service/hlo_module_config.h\"\n#include \"xla/shape.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/logging.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nstatic std::vector GetRelevantConvs(\n HloComputation* comp) {\n std::vector convs;\n for (HloInstruction* instr : comp->instructions()) {\n if (instr->opcode() != HloOpcode::kCustomCall ||\n (instr->custom_call_target() != kCudnnConvForwardCallTarget &&\n instr->custom_call_target() !=\n kCudnnConvBiasActivationForwardCallTarget) ||\n instr->operand_count() < 2) {\n continue;\n }\n PrimitiveType input_ty = instr->operand(0)->shape().element_type();\n PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type();\n if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) {\n convs.push_back(Cast(instr));\n }\n }\n return convs;\n}\nstatic absl::StatusOr BuilderToHloComputation(\n XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) {\n TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root));\n TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());\n HloModuleConfig config(program_shape);\n TF_ASSIGN_OR_RETURN(auto new_module,\n HloModule::CreateFromProto(comp.proto(), config));\n HloModule* dest_module = sibling_computation->parent();\n HloCloneContext context(dest_module);\n return dest_module->DeepCloneComputation(new_module->entry_computation(),\n &context);\n}\nstatic XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) {\n XlaBuilder& b = *instr.builder();\n Shape shape = b.GetShape(instr).value();\n DimensionVector new_dims(shape.dimensions().begin(),\n shape.dimensions().end());\n CHECK_EQ(new_dims[dim] % vect_size, 0);\n new_dims[dim] /= vect_size;\n new_dims.insert(new_dims.begin() + dim + 1, vect_size);\n return Reshape(instr, new_dims);\n}\nstatic Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) {\n DimensionVector new_dims(shape.dimensions().begin(),\n shape.dimensions().end());\n CHECK_EQ(new_dims[dim] % vect_size, 0);\n new_dims[dim] /= vect_size;\n new_dims.insert(new_dims.begin() + dim + 1, vect_size);\n return ShapeUtil::MakeShape(shape.element_type(), new_dims);\n}\nstatic XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) {\n XlaBuilder& b = *instr.builder();\n int64_t rank = b.GetShape(instr)->dimensions_size();\n DimensionVector idxs(rank);\n absl::c_iota(idxs, 0);\n if (src < dst) {\n idxs.insert(idxs.begin() + dst, src);\n idxs.erase(idxs.begin() + src);\n } else {\n idxs.erase(idxs.begin() + src);\n idxs.insert(idxs.begin() + dst, src);\n }\n return Transpose(instr, idxs);\n}\nstatic XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,\n int64_t vect_size) {\n XlaBuilder& b = *instr.builder();\n Shape shape = b.GetShape(instr).value();\n auto size = [&](int64_t d) { return shape.dimensions(d); };\n CHECK_LE(size(vect_dim), vect_size);\n CHECK_EQ(vect_size % size(vect_dim), 0);\n int64_t split_factor = vect_size / size(vect_dim);\n CHECK_EQ(size(dim) % split_factor, 0);\n instr = SplitAtDim(instr, dim, split_factor);\n if (vect_dim > dim) {\n vect_dim++;\n }\n instr = MoveDim(instr, dim + 1, vect_dim);\n if (vect_dim > dim) {\n vect_dim--;\n }\n return Collapse(instr, {vect_dim, vect_dim + 1});\n}\nstatic XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,\n int64_t orig_vect_size) {\n XlaBuilder& b = *instr.builder();\n Shape shape = b.GetShape(instr).value();\n auto size = [&](int64_t d) { return shape.dimensions(d); };\n CHECK_GE(size(vect_dim), orig_vect_size);\n CHECK_EQ(size(vect_dim) % orig_vect_size, 0);\n instr = SplitAtDim(instr, vect_dim, orig_vect_size);\n if (dim > vect_dim) {\n dim++;\n }\n instr = MoveDim(instr, vect_dim, dim + 1);\n if (dim > vect_dim) {\n dim--;\n }\n return Collapse(instr, {dim, dim + 1});\n}\nstatic ConvolutionDimensionNumbers VectorizeDnums(\n ConvolutionDimensionNumbers dnums, bool reordered_filter) {\n int64_t input_vect_dim = dnums.input_feature_dimension();\n if (dnums.input_batch_dimension() > input_vect_dim) {\n dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1);\n }\n for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) {\n if (d > input_vect_dim) {\n ++d;\n }\n }\n if (!reordered_filter) {\n int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension();\n if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) {\n dnums.set_kernel_output_feature_dimension(\n dnums.kernel_output_feature_dimension() + 1);\n }\n for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) {\n if (d > kernel_vect_dim) {\n ++d;\n }\n }\n }\n int64_t output_vect_dim = dnums.output_feature_dimension();\n if (dnums.output_batch_dimension() > output_vect_dim) {\n dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1);\n }\n for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) {\n if (d > output_vect_dim) {\n ++d;\n }\n }\n return dnums;\n}\nabsl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv,\n XlaOp* operands) {\n bool has_bias = conv->operand_count() > 2;\n VLOG(1) << \"Reordering filter\" << (has_bias ? \" and bias\" : \"\")\n << \" (replacement for cudnnReorderFilterAndBias)\";\n auto builder = operands->builder();\n ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();\n TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,\n conv->backend_config());\n CudnnConvBackendConfig& config =\n *gpu_config.mutable_cudnn_conv_backend_config();\n config.set_reordered_int8_nchw_vect(true);\n TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));\n TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1]));\n TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering(\n filter_shape, dnums));\n XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]);\n XlaOp transpose = Transpose(reshape, reorder.permutation);\n operands[1] = Reshape(reorder.result_shape, transpose);\n dnums.set_kernel_output_feature_dimension(0);\n dnums.set_kernel_input_feature_dimension(1);\n dnums.set_kernel_spatial_dimensions(0, 2);\n dnums.set_kernel_spatial_dimensions(1, 3);\n conv->set_convolution_dimension_numbers(dnums);\n if (has_bias) {\n TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2]));\n TF_ASSIGN_OR_RETURN(reorder,\n CudnnInferTransposeForBiasReordering(bias_shape));\n reshape = Reshape(reorder.transpose_shape, operands[2]);\n transpose = Transpose(reshape, reorder.permutation);\n operands[2] = Reshape(reorder.result_shape, transpose);\n }\n return absl::OkStatus();\n}\nstatic absl::StatusOr TryRevectorizeConv(\n const se::CudaComputeCapability& compute_capability,\n const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,\n int vect_size) {\n const Shape& input_shape = conv->operand(0)->shape();\n const Shape& kernel_shape = conv->operand(1)->shape();\n const Shape& output_shape = conv->shape().tuple_shapes(0);\n const ConvolutionDimensionNumbers* dnums =\n &conv->convolution_dimension_numbers();\n std::optional input_vect_dim;\n std::optional kernel_vect_dim;\n std::optional output_vect_dim;\n std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) =\n FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape,\n output_shape);\n if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() ||\n !output_vect_dim.has_value()) {\n return false;\n }\n int64_t input_feat_size =\n input_shape.dimensions(dnums->input_feature_dimension());\n int64_t output_feat_size =\n output_shape.dimensions(dnums->output_feature_dimension());\n int64_t input_vect_size = input_shape.dimensions(*input_vect_dim);\n int64_t output_vect_size = output_shape.dimensions(*output_vect_dim);\n if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 ||\n input_feat_size % (vect_size / input_vect_size) != 0 ||\n output_feat_size % (vect_size / output_vect_size) != 0) {\n return false;\n }\n if (primitive_util::IsIntegralType(input_shape.element_type())) {\n TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,\n CudnnSupportsOptimizedIntegerConvolution(\n compute_capability, *conv, vect_size));\n if (!supported_target_vectorization) {\n VLOG(3) << \"Skipping re-vectorization of conv to vector size: \"\n << vect_size << \": \" << conv->ToString();\n return false;\n }\n }\n VLOG(1) << \"Re-vectorizing conv channels from \"\n << input_shape.dimensions(*input_vect_dim) << \" to \" << vect_size\n << \": \" << conv->ToString();\n XlaBuilder b(absl::StrCat(conv->name(), \".revectorized\"));\n b.SetOpMetadata(conv->metadata());\n XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), \"filter\");\n absl::InlinedVector new_operands = {\n RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), \"input\"),\n dnums->input_feature_dimension(), *input_vect_dim,\n vect_size),\n RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(),\n *kernel_vect_dim, vect_size),\n };\n if (conv->operand_count() > 2) {\n new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), \"bias\"));\n }\n if (conv->operand_count() > 3) {\n new_operands.push_back(RevectorizeInstr(\n Parameter(&b, 3, conv->operand(3)->shape(), \"side_input\"),\n dnums->input_feature_dimension(), *input_vect_dim, vect_size));\n }\n if (conv->operand_count() > 4) {\n return InvalidArgument(\n \"Don't understand a conv with more than 4 arguments: %s\",\n conv->ToString());\n }\n const auto& debug_options = conv->GetModule()->config().debug_options();\n bool use_reordering =\n input_shape.element_type() == xla::S8 && vect_size == 32 &&\n debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&\n cudnn_version >= se::dnn::VersionInfo{8, 3, 0};\n if (use_reordering) {\n int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim);\n if (kernel_vect_size == 4 || kernel_vect_size == 32) {\n new_operands[1] = filter;\n }\n TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));\n dnums = &conv->convolution_dimension_numbers();\n }\n DimensionVector new_output_dims(output_shape.dimensions().begin(),\n output_shape.dimensions().end());\n new_output_dims[dnums->output_feature_dimension()] /=\n (vect_size / output_vect_size);\n new_output_dims[*output_vect_dim] = vect_size;\n XlaOp new_conv = CustomCallWithConvDnums(\n &b, conv->custom_call_target(), new_operands,\n ShapeUtil::MakeTupleShape(\n {ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims),\n ShapeUtil::MakeShape(U8, {0})}),\n {},\n conv->raw_backend_config_string(), false,\n {}, nullptr,\n conv->window(),\n *dnums);\n XlaOp new_conv_result = GetTupleElement(new_conv, 0);\n XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);\n XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr(\n new_conv_result, dnums->output_feature_dimension(), *output_vect_dim,\n output_shape.dimensions(*output_vect_dim));\n TF_ASSIGN_OR_RETURN(\n HloComputation * new_conv_comp,\n BuilderToHloComputation(\n b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}),\n conv->parent()));\n auto new_conv_comp_instrs = new_conv_comp->instructions();\n auto new_conv_it =\n absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) {\n return instr->opcode() == HloOpcode::kCustomCall;\n });\n if (new_conv_it != new_conv_comp_instrs.end()) {\n new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it,\n conv->name());\n }\n VLOG(1) << \"Re-vectorized conv to \" << new_conv_comp->ToString();\n TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(\n conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),\n new_conv_comp)));\n return true;\n}\nstatic absl::StatusOr TryVectorizeConv(\n const se::CudaComputeCapability& compute_capability,\n const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,\n int64_t vect_size) {\n const Shape& input_shape = conv->operand(0)->shape();\n const Shape& output_shape = conv->shape().tuple_shapes(0);\n const ConvolutionDimensionNumbers* dnums =\n &conv->convolution_dimension_numbers();\n int64_t in_channels =\n input_shape.dimensions(dnums->input_feature_dimension());\n int64_t out_channels =\n output_shape.dimensions(dnums->output_feature_dimension());\n if (in_channels % vect_size != 0 || out_channels % vect_size != 0) {\n return false;\n }\n if (input_shape.dimensions_size() >\n 2 + dnums->input_spatial_dimensions_size()) {\n return false;\n }\n if (primitive_util::IsIntegralType(input_shape.element_type())) {\n TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,\n CudnnSupportsOptimizedIntegerConvolution(\n compute_capability, *conv, vect_size));\n if (!supported_target_vectorization) {\n VLOG(3) << \"Skipping vectorization of conv to vector size: \" << vect_size\n << \": \" << conv->ToString();\n return false;\n }\n }\n VLOG(1) << \"Vectorizing conv channels by \" << vect_size << \": \"\n << conv->ToString();\n XlaBuilder b(absl::StrCat(conv->name(), \".revectorized\"));\n b.SetOpMetadata(conv->metadata());\n XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), \"filter\");\n absl::InlinedVector new_operands = {\n SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), \"input\"),\n dnums->input_feature_dimension(), vect_size),\n SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size),\n };\n if (conv->operand_count() > 2) {\n new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), \"bias\"));\n }\n if (conv->operand_count() > 3) {\n new_operands.push_back(\n SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), \"side_input\"),\n dnums->output_feature_dimension(), vect_size));\n }\n if (conv->operand_count() > 4) {\n return InvalidArgument(\n \"Don't understand a conv with more than 4 arguments: %s\",\n conv->ToString());\n }\n const auto& debug_options = conv->GetModule()->config().debug_options();\n bool use_reordering =\n input_shape.element_type() == xla::S8 && vect_size == 32 &&\n debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&\n cudnn_version >= se::dnn::VersionInfo{8, 3, 0};\n if (use_reordering) {\n new_operands[1] = filter;\n TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));\n dnums = &conv->convolution_dimension_numbers();\n }\n Shape new_output_shape = SplitShapeAtDim(\n output_shape, dnums->output_feature_dimension(), vect_size);\n XlaOp new_conv = CustomCallWithConvDnums(\n &b, conv->custom_call_target(), new_operands,\n ShapeUtil::MakeTupleShape(\n {new_output_shape, ShapeUtil::MakeShape(U8, {0})}),\n {},\n conv->raw_backend_config_string(), false,\n {}, nullptr,\n conv->window(),\n VectorizeDnums(*dnums, use_reordering));\n XlaOp new_conv_result = GetTupleElement(new_conv, 0);\n XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);\n XlaOp conv_result_collapsed =\n Collapse(new_conv_result, {dnums->output_feature_dimension(),\n dnums->output_feature_dimension() + 1});\n TF_ASSIGN_OR_RETURN(\n HloComputation * new_conv_comp,\n BuilderToHloComputation(\n b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}),\n conv->parent()));\n VLOG(1) << \"Vectorized conv to: \" << new_conv_comp->ToString();\n TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(\n conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),\n new_conv_comp)));\n return true;\n}\n} \nabsl::StatusOr CudnnVectorizeConvolutions::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n for (HloComputation* comp :\n module->MakeNonfusionComputations(execution_threads)) {\n for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {\n bool local_changed = false;\n if (compute_capability_.IsAtLeast(7, 5)) {\n TF_ASSIGN_OR_RETURN(\n local_changed,\n TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32));\n if (!local_changed) {\n TF_ASSIGN_OR_RETURN(\n local_changed,\n TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32));\n }\n }\n if (!local_changed) {\n TF_ASSIGN_OR_RETURN(\n local_changed,\n TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4));\n }\n changed |= local_changed;\n }\n }\n return changed;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/transforms/cudnn_vectorize_convolutions.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/status/statusor.h\"\n#include \"xla/service/call_inliner.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/gpu/cublas_cudnn.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/stream_executor/device_description.h\"\n#include \"xla/stream_executor/dnn.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/util.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla {\nnamespace gpu {\nnamespace {\nnamespace m = ::xla::match;\nclass CudnnVectorizeConvolutionsTest : public HloTestBase {\n protected:\n absl::StatusOr Run(std::pair compute_capability,\n HloModule* module) {\n CudnnVectorizeConvolutions pass(\n se::CudaComputeCapability{compute_capability.first,\n compute_capability.second},\n se::dnn::VersionInfo(8, 3, 0));\n TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module));\n CallInliner inliner;\n TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status());\n return changed;\n }\n};\nTEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,40] parameter(0)\n filter = s8[2,2,40,44] parameter(1)\n ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\",\n backend_config=\"{bar: 0}\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(m::GetTupleElement(\n m::CustomCall(&conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 10, 4}),\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {2, 2, 10, 4, 44}))\n .WithConvDnums(\"b01f?_01i?o->b01f?\"))\n .WithShape(S8, {10, 20, 30, 11, 4})),\n m::Op())));\n EXPECT_EQ(conv->raw_backend_config_string(), \"{bar: 0}\");\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,40] parameter(0)\n filter = f32[2,2,40,44] parameter(1)\n ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\",\n backend_config=\"{bar: 0}\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,48,20,30] parameter(0)\n filter = s8[48,44,2,2] parameter(1)\n ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=bf01_io01->bf01,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(m::GetTupleElement(\n m::CustomCall(&conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 12, 4, 20, 30}),\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {12, 4, 44, 2, 2}))\n .WithConvDnums(\"bf?01_i?o01->bf?01\"))\n .WithShape(S8, {10, 11, 4, 20, 30})),\n m::Op())));\n}\nTEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[16,16,16,16] parameter(0)\n filter = s8[16,16,3,3] parameter(1)\n ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=fb01_i01o->fb01,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(m::GetTupleElement(\n m::CustomCall(&conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {4, 4, 16, 16, 16}),\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {4, 4, 16, 3, 3}))\n .WithConvDnums(\"f?b01_i?01o->f?b01\"))\n .WithShape(S8, {4, 4, 16, 16, 16})),\n m::Op())));\n}\nTEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[1,20,9,9] parameter(0)\n filter = s8[3,3,20,32] parameter(1)\n ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter),\n window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(m::GetTupleElement(\n m::CustomCall(&conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {1, 5, 4, 9, 9}),\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {3, 3, 5, 4, 32}))\n .WithConvDnums(\"bf?01_01i?o->bf?01\"))\n .WithShape(S8, {1, 8, 4, 9, 9})),\n m::Op())));\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,41] parameter(0)\n filter = s8[2,2,41,44] parameter(1)\n ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n CudnnVectorizeConvolutions pass(\n {7, 5},\n se::dnn::VersionInfo{8, 3, 0});\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_FALSE(changed);\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,41] parameter(0)\n filter = s8[2,2,41,44] parameter(1)\n ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_FALSE(changed);\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,41] parameter(0)\n filter = s8[2,2,41,44] parameter(1)\n ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n SCOPED_TRACE(module->ToString());\n EXPECT_FALSE(changed);\n}\nTEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(\n m::Transpose(\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2}))\n .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{2, 0, 1, 5, 7, 3, 6,\n 4});\n }))\n .WithShape(S8, {128, 2, 2, 2, 32})))\n .WithShape(S8, {10, 20, 30, 4, 32})),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n bias = f32[128] parameter(2)\n side_input = s8[10,20,30,64] parameter(3)\n ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))\n .WithShape(S8, {128, 2, 2, 2, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(2))\n .WithShape(F32, {4, 4, 2, 4}))\n .WithShape(F32, {4, 2, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{0, 2, 1, 3});\n }))\n .WithShape(F32, {128}),\n m::Reshape(m::Parameter(3))\n .WithShape(S8, {10, 20, 30, 2, 32})))\n .WithShape(S8, {10, 20, 30, 4, 32})),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n bias = f32[128] parameter(2)\n side_input = s8[10,128,20,30] parameter(3)\n ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=2x2}, dim_labels=b01f_01io->bf01,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))\n .WithShape(S8, {128, 2, 2, 2, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(2))\n .WithShape(F32, {4, 4, 2, 4}))\n .WithShape(F32, {4, 2, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{0, 2, 1, 3});\n }))\n .WithShape(F32, {128}),\n m::Reshape(m::Parameter(3))\n .WithShape(S8, {10, 4, 32, 20, 30})))\n .WithShape(S8, {10, 4, 32, 20, 30})),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,64] parameter(0)\n filter = s8[2,2,64,128] parameter(1)\n ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n ASSERT_THAT(\n root,\n GmockMatch(m::Tuple(\n m::Reshape(m::GetTupleElement(\n m::CustomCall(&conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 16, 4}),\n m::Reshape(m::Parameter(1))\n .WithShape(S8, {2, 2, 16, 4, 128})))\n .WithShape(S8, {10, 20, 30, 32, 4})),\n m::Op())));\n EXPECT_FALSE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,16,4] parameter(0)\n filter = s8[3,5,16,192,4] parameter(1)\n bias = f32[64] parameter(2)\n side_input = s8[10,20,30,16,4] parameter(3)\n ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=3x5}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n auto conv_pat =\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 8, 4}))\n .WithShape(S8, {10, 20, 30, 2, 8, 4}))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(1))\n .WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4}))\n .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{2, 0, 1, 4, 6, 3, 5, 7});\n }))\n .WithShape(S8, {192, 2, 3, 5, 32}),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))\n .WithShape(S8, {10, 20, 30, 2, 8, 4}))\n .WithShape(S8, {10, 20, 30, 2, 8, 4}))\n .WithShape(S8, {10, 20, 30, 2, 32}))\n .WithConvDnums(\"b01f?_oi01?->b01f?\"))\n .WithShape(S8, {10, 20, 30, 6, 32});\n ASSERT_THAT(root, GmockMatch(m::Tuple(\n m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(\n S8, {10, 20, 30, 6, 8, 4}))\n .WithShape(S8, {10, 20, 30, 6, 8, 4}))\n .WithShape(S8, {10, 20, 30, 48, 4}),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,16,20,30,4] parameter(0)\n filter = s8[16,128,2,2,4] parameter(1)\n bias = f32[64] parameter(2)\n side_input = s8[10,16,20,30,4] parameter(3)\n ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=2x2}, dim_labels=bf01_io01->bf01,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n auto conv_pat =\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 2, 8, 20, 30, 4}))\n .WithShape(S8, {10, 2, 20, 30, 8, 4}))\n .WithShape(S8, {10, 2, 20, 30, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(1))\n .WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4}))\n .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{0, 5, 6, 2, 4, 1, 3, 7});\n }))\n .WithShape(S8, {128, 2, 2, 2, 32}),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))\n .WithShape(S8, {10, 2, 8, 20, 30, 4}))\n .WithShape(S8, {10, 2, 20, 30, 8, 4}))\n .WithShape(S8, {10, 2, 20, 30, 32}))\n .WithConvDnums(\"bf01_oi01->bf01\"))\n .WithShape(S8, {10, 4, 20, 30, 32});\n ASSERT_THAT(root, GmockMatch(m::Tuple(\n m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(\n S8, {10, 4, 20, 30, 8, 4}))\n .WithShape(S8, {10, 4, 8, 20, 30, 4}))\n .WithShape(S8, {10, 32, 20, 30, 4}),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[4,10,20,30,16] parameter(0)\n filter = s8[4,3,5,16,192] parameter(1)\n bias = f32[64] parameter(2)\n side_input = s8[4,10,20,30,16] parameter(3)\n ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=3x5}, dim_labels=?b01f_?01io->?b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n auto conv_pat =\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))\n .WithShape(S8, {4, 10, 20, 30, 2, 8}))\n .WithShape(S8, {8, 4, 10, 20, 30, 2}))\n .WithShape(S8, {32, 10, 20, 30, 2}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(1))\n .WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2}))\n .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})\n .WithPredicate([](const HloInstruction* instr) {\n return absl::c_equal(\n instr->dimensions(),\n std::vector{3, 1, 2, 5, 7, 4, 6, 0});\n }))\n .WithShape(S8, {192, 2, 3, 5, 32}),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))\n .WithShape(S8, {4, 10, 20, 30, 2, 8}))\n .WithShape(S8, {8, 4, 10, 20, 30, 2}))\n .WithShape(S8, {32, 10, 20, 30, 2}))\n .WithConvDnums(\"?b01f_oi01->?b01f\"))\n .WithShape(S8, {32, 10, 20, 30, 6});\n ASSERT_THAT(root, GmockMatch(m::Tuple(\n m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(\n S8, {8, 4, 10, 20, 30, 6}))\n .WithShape(S8, {4, 10, 20, 30, 6, 8}))\n .WithShape(S8, {4, 10, 20, 30, 48}),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,16,4] parameter(0)\n filter = s8[2,2,16,128,4] parameter(1)\n bias = f32[10] parameter(2)\n side_input = s8[10,20,30,16,4] parameter(3)\n ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input),\n window={size=2x2}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));\n EXPECT_FALSE(changed);\n}\nTEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,4,16] parameter(0)\n filter = s8[3,5,4,192,16] parameter(1)\n ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter),\n window={size=3x5}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n auto filter_pat =\n m::Reshape(\n m::Transpose(\n m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16}))\n .WithShape(S8, {3, 5, 2, 192, 2, 16}))\n .WithShape(S8, {3, 5, 2, 192, 32});\n auto conv_pat =\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 2, 16}))\n .WithShape(S8, {10, 20, 30, 2, 2, 16}))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(filter_pat)\n .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))\n .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))\n .WithShape(S8, {192, 2, 3, 5, 32}))\n .WithConvDnums(\"b01f_oi01->b01f\"))\n .WithShape(S8, {10, 20, 30, 6, 32});\n ASSERT_THAT(root, GmockMatch(m::Tuple(\n m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(\n S8, {10, 20, 30, 6, 2, 16}))\n .WithShape(S8, {10, 20, 30, 6, 2, 16}))\n .WithShape(S8, {10, 20, 30, 12, 16}),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\nTEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) {\n auto module = ParseAndReturnVerifiedModule(R\"(\n HloModule TestModule\n ENTRY TestComputation {\n input = s8[10,20,30,8,8] parameter(0)\n filter = s8[3,5,2,192,32] parameter(1)\n ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter),\n window={size=3x5}, dim_labels=b01f_01io->b01f,\n custom_call_target=\"__cudnn$convForward\"\n })\")\n .value();\n TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));\n EXPECT_TRUE(changed);\n SCOPED_TRACE(module->ToString());\n auto* root = module->entry_computation()->root_instruction();\n const HloInstruction* conv = nullptr;\n auto conv_pat =\n m::GetTupleElement(\n m::CustomCall(\n &conv, {kCudnnConvForwardCallTarget},\n m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))\n .WithShape(S8, {10, 20, 30, 2, 4, 8}))\n .WithShape(S8, {10, 20, 30, 2, 4, 8}))\n .WithShape(S8, {10, 20, 30, 2, 32}),\n m::Reshape(\n m::Transpose(m::Reshape(m::Parameter(1))\n .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))\n .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))\n .WithShape(S8, {192, 2, 3, 5, 32}))\n .WithConvDnums(\"b01f_oi01->b01f\"))\n .WithShape(S8, {10, 20, 30, 6, 32});\n ASSERT_THAT(root, GmockMatch(m::Tuple(\n m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(\n S8, {10, 20, 30, 6, 16, 2}))\n .WithShape(S8, {10, 20, 30, 6, 16, 2}))\n .WithShape(S8, {10, 20, 30, 96, 2}),\n m::Op())));\n EXPECT_TRUE(conv->backend_config()\n ->cudnn_conv_backend_config()\n .reordered_int8_nchw_vect());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":283,"cells":{"ID":{"kind":"string","value":"376cd3aa-a22b-41e5-b02a-66981383442d"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"quantize_nodes"},"File Path in Repository":{"kind":"string","value":"tensorflow/tools/graph_transforms/quantize_nodes.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/tools/graph_transforms/quantize_nodes_test.cc"},"Code":{"kind":"string","value":"#define EIGEN_USE_THREADS\n#include \"tensorflow/core/common_runtime/constant_folding.h\"\n#include \"tensorflow/core/common_runtime/graph_constructor.h\"\n#include \"tensorflow/core/common_runtime/threadpool_device.h\"\n#include \"tensorflow/core/graph/node_builder.h\"\n#include \"tensorflow/core/graph/subgraph.h\"\n#include \"tensorflow/core/kernels/quantization_utils.h\"\n#include \"tensorflow/core/platform/init_main.h\"\n#include \"tensorflow/core/public/session.h\"\n#include \"tensorflow/tools/graph_transforms/transform_utils.h\"\nnamespace tensorflow {\nnamespace graph_transforms {\nstruct QuantizedOpInfo {\n string float_name;\n std::vector attrs_to_copy;\n std::vector> dtypes_to_set;\n DataType input_bit_depth;\n DataType output_bit_depth;\n std::set unquantized_inputs;\n enum { CONTIGUOUS_MIN_MAX, SEPARATE_MIN_MAX } min_max_order;\n};\nconst std::vector& GetQuantizedOpList() {\n static const std::vector op_list = {\n {\"Add\",\n {},\n {{\"T1\", DT_QUINT8}, {\"T2\", DT_QUINT8}, {\"Toutput\", DT_QINT32}},\n DT_QUINT8,\n DT_QINT32,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"AvgPool\",\n {\"ksize\", \"strides\", \"padding\"},\n {{\"T\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"BiasAdd\",\n {},\n {{\"T1\", DT_QUINT8}, {\"T2\", DT_QUINT8}, {\"out_type\", DT_QINT32}},\n DT_QUINT8,\n DT_QINT32,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"Concat\",\n {\"N\"},\n {{\"T\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {0},\n QuantizedOpInfo::SEPARATE_MIN_MAX},\n {\"Conv2D\",\n {\"strides\", \"padding\"},\n {{\"Tinput\", DT_QUINT8}, {\"Tfilter\", DT_QUINT8}, {\"out_type\", DT_QINT32}},\n DT_QUINT8,\n DT_QINT32,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"MatMul\",\n {\"transpose_a\", \"transpose_b\"},\n {{\"T1\", DT_QUINT8}, {\"T2\", DT_QUINT8}, {\"Toutput\", DT_QINT32}},\n DT_QUINT8,\n DT_QINT32,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"MaxPool\",\n {\"ksize\", \"strides\", \"padding\"},\n {{\"T\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"Mul\",\n {},\n {{\"T1\", DT_QUINT8}, {\"T2\", DT_QUINT8}, {\"Toutput\", DT_QINT32}},\n DT_QUINT8,\n DT_QINT32,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"Relu\",\n {},\n {{\"Tinput\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"ResizeBilinear\",\n {\"align_corners\"},\n {{\"T\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {1},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"Relu6\",\n {},\n {{\"Tinput\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n {\"Reshape\",\n {},\n {{\"T\", DT_QUINT8}},\n DT_QUINT8,\n DT_QUINT8,\n {1},\n QuantizedOpInfo::CONTIGUOUS_MIN_MAX},\n };\n return op_list;\n}\nnamespace {\nstring UniqueNodeNameFromInput(const string& input_name) {\n string prefix;\n string node_name;\n string suffix;\n NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);\n string result;\n if (prefix == \"^\") {\n result += \"__hat__\";\n }\n result += node_name;\n if (!suffix.empty()) {\n result += \"__port__\" + suffix.substr(1, suffix.size() - 1);\n }\n return result;\n}\nStatus ExtractRangeFromParams(const TransformFuncContext& context,\n const string& min_name, const string& max_name,\n float* min_value, float* max_value,\n bool* has_range) {\n const bool has_min = (context.params.count(min_name) != 0);\n const bool has_max = (context.params.count(max_name) != 0);\n *has_range = (has_min || has_max);\n if (!*has_range) {\n return OkStatus();\n }\n if (!has_min || !has_max) {\n return errors::InvalidArgument(\"You must pass both \", min_name, \" and \",\n max_name, \" into quantize_nodes\");\n }\n TF_RETURN_IF_ERROR(context.GetOneFloatParameter(min_name, 0.0f, min_value));\n TF_RETURN_IF_ERROR(context.GetOneFloatParameter(max_name, 0.0f, max_value));\n return OkStatus();\n}\n} \nStatus MergeDuplicateNodes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n std::set input_names(context.input_names.begin(),\n context.input_names.end());\n std::set output_names(context.output_names.begin(),\n context.output_names.end());\n GraphDef current_graph_def = input_graph_def;\n bool any_duplicates_found;\n do {\n any_duplicates_found = false;\n std::map> hashed_nodes;\n for (const NodeDef& node : current_graph_def.node()) {\n NodeDef nameless_node = node;\n if (!input_names.count(node.name()) && !output_names.count(node.name())) {\n nameless_node.set_name(\"\");\n }\n const uint64 hash = HashNodeDef(nameless_node);\n hashed_nodes[hash].push_back(&node);\n }\n std::map inputs_to_rename;\n GraphDef merged_graph_def;\n for (const std::pair>&\n hashed_node_info : hashed_nodes) {\n const std::vector& hash_node_list =\n hashed_node_info.second;\n for (int i = 0; i < hash_node_list.size(); ++i) {\n const NodeDef* current_node = hash_node_list[i];\n const OpDef* op_def = nullptr;\n TF_RETURN_IF_ERROR(\n OpRegistry::Global()->LookUpOpDef(current_node->op(), &op_def));\n const bool is_duplicate = ((!op_def->is_stateful()) && (i > 0));\n if (is_duplicate) {\n const string original_name = hash_node_list[0]->name();\n inputs_to_rename[current_node->name() + \":*\"] = original_name;\n any_duplicates_found = true;\n } else {\n NodeDef* new_node = merged_graph_def.mutable_node()->Add();\n *new_node = *current_node;\n }\n }\n }\n TF_RETURN_IF_ERROR(RenameNodeInputs(merged_graph_def, inputs_to_rename,\n std::unordered_set(),\n &current_graph_def));\n } while (any_duplicates_found);\n *output_graph_def = current_graph_def;\n return OkStatus();\n}\nStatus RemoveRedundantQuantizations(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n std::set graph_outputs;\n for (const string& output_name : context.output_names) {\n graph_outputs.insert(NodeNameFromInput(output_name));\n }\n std::map inputs_to_rename;\n GraphDef replaced_graph_def;\n TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(\n input_graph_def, \n {\"QuantizeV2\",\n {\n {\"Dequantize\"},\n {\"Min\"},\n {\"Max\"},\n }\n }, \n [&inputs_to_rename, &graph_outputs](const NodeMatch& match,\n const std::set& input_nodes,\n const std::set& output_nodes,\n std::vector* new_nodes) {\n const NodeDef& quantize_node = match.node;\n const NodeDef& dequantize_node = match.inputs[0].node;\n inputs_to_rename[quantize_node.name() + \":0\"] =\n dequantize_node.input(0);\n inputs_to_rename[quantize_node.name() + \":1\"] =\n dequantize_node.input(1);\n inputs_to_rename[quantize_node.name() + \":2\"] =\n dequantize_node.input(2);\n if (output_nodes.count(dequantize_node.name()) ||\n graph_outputs.count(dequantize_node.name())) {\n CopyOriginalMatch(match, new_nodes);\n }\n return OkStatus();\n },\n {true}, &replaced_graph_def));\n return RenameNodeInputs(replaced_graph_def, inputs_to_rename,\n std::unordered_set(), output_graph_def);\n}\nStatus QuantizePlaceholders(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n float input_min;\n float input_max;\n bool has_input_range;\n TF_RETURN_IF_ERROR(ExtractRangeFromParams(context, \"input_min\", \"input_max\",\n &input_min, &input_max,\n &has_input_range));\n if (!has_input_range) {\n *output_graph_def = input_graph_def;\n return OkStatus();\n }\n std::map inputs_to_rename_first_pass;\n std::map inputs_to_rename_second_pass;\n GraphDef placeholder_graph_def;\n placeholder_graph_def.Clear();\n for (const NodeDef& node : input_graph_def.node()) {\n if (node.op() != \"Placeholder\") {\n *(placeholder_graph_def.mutable_node()->Add()) = node;\n } else {\n string namespace_prefix = node.name() + \"_eightbit\";\n NodeDef quantized_placeholder;\n quantized_placeholder = node;\n SetNodeAttr(\"dtype\", DT_QUINT8, &quantized_placeholder);\n *(placeholder_graph_def.mutable_node()->Add()) = quantized_placeholder;\n NodeDef min_node;\n min_node.set_op(\"Const\");\n min_node.set_name(namespace_prefix + \"/min\");\n SetNodeAttr(\"dtype\", DT_FLOAT, &min_node);\n Tensor min_tensor(DT_FLOAT, {});\n min_tensor.flat()(0) = input_min;\n SetNodeTensorAttr(\"value\", min_tensor, &min_node);\n *(placeholder_graph_def.mutable_node()->Add()) = min_node;\n NodeDef max_node;\n max_node.set_op(\"Const\");\n max_node.set_name(namespace_prefix + \"/max\");\n SetNodeAttr(\"dtype\", DT_FLOAT, &max_node);\n Tensor max_tensor(DT_FLOAT, {});\n max_tensor.flat()(0) = input_max;\n SetNodeTensorAttr(\"value\", max_tensor, &max_node);\n *(placeholder_graph_def.mutable_node()->Add()) = max_node;\n const string rename_suffix = \"__RENAMED_PLACEHOLDER__\";\n NodeDef dequantize_node;\n dequantize_node.set_op(\"Dequantize\");\n dequantize_node.set_name(namespace_prefix + \"/dequantize\");\n SetNodeAttr(\"T\", DT_QUINT8, &dequantize_node);\n SetNodeAttr(\"mode\", \"MIN_FIRST\", &dequantize_node);\n AddNodeInput(node.name() + rename_suffix, &dequantize_node);\n AddNodeInput(min_node.name(), &dequantize_node);\n AddNodeInput(max_node.name(), &dequantize_node);\n *(placeholder_graph_def.mutable_node()->Add()) = dequantize_node;\n inputs_to_rename_first_pass[node.name()] = dequantize_node.name();\n inputs_to_rename_second_pass[node.name() + rename_suffix] = node.name();\n }\n }\n GraphDef first_pass_graph_def;\n TF_RETURN_IF_ERROR(\n RenameNodeInputs(placeholder_graph_def, inputs_to_rename_first_pass,\n std::unordered_set(), &first_pass_graph_def));\n TF_RETURN_IF_ERROR(\n RenameNodeInputs(first_pass_graph_def, inputs_to_rename_second_pass,\n std::unordered_set(), output_graph_def));\n return OkStatus();\n}\nStatus ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(\n input_graph_def, \n {\"FakeQuantWithMinMaxVars\",\n {\n {\"*\"},\n {\"Const\"},\n {\"Const\"},\n }\n }, \n [](const NodeMatch& match, const std::set& input_nodes,\n const std::set& output_nodes,\n std::vector* new_nodes) {\n const NodeDef& fake_quant_node = match.node;\n const NodeDef& original_op_node = match.inputs[0].node;\n const NodeDef& fake_quant_min_node = match.inputs[1].node;\n const NodeDef& fake_quant_max_node = match.inputs[2].node;\n string namespace_prefix = fake_quant_node.name() + \"_eightbit\";\n new_nodes->push_back(original_op_node);\n new_nodes->push_back(fake_quant_min_node);\n new_nodes->push_back(fake_quant_max_node);\n NodeDef quantize_node;\n quantize_node.set_op(\"QuantizeV2\");\n quantize_node.set_name(namespace_prefix + \"/quantize\");\n SetNodeAttr(\"T\", DT_QINT32, &quantize_node);\n SetNodeAttr(\"mode\", \"MIN_FIRST\", &quantize_node);\n AddNodeInput(fake_quant_node.input(0), &quantize_node);\n AddNodeInput(fake_quant_min_node.name(), &quantize_node);\n AddNodeInput(fake_quant_max_node.name(), &quantize_node);\n new_nodes->push_back(quantize_node);\n NodeDef requantize_node;\n requantize_node.set_op(\"Requantize\");\n requantize_node.set_name(namespace_prefix + \"/requantize\");\n SetNodeAttr(\"Tinput\", DT_QINT32, &requantize_node);\n SetNodeAttr(\"out_type\", DT_QUINT8, &requantize_node);\n AddNodeInput(quantize_node.name() + \":0\", &requantize_node);\n AddNodeInput(quantize_node.name() + \":1\", &requantize_node);\n AddNodeInput(quantize_node.name() + \":2\", &requantize_node);\n AddNodeInput(fake_quant_min_node.name(), &requantize_node);\n AddNodeInput(fake_quant_max_node.name(), &requantize_node);\n new_nodes->push_back(requantize_node);\n NodeDef dequantize_node;\n dequantize_node.set_op(\"Dequantize\");\n dequantize_node.set_name(fake_quant_node.name());\n SetNodeAttr(\"T\", DT_QUINT8, &dequantize_node);\n SetNodeAttr(\"mode\", \"MIN_FIRST\", &dequantize_node);\n AddNodeInput(requantize_node.name() + \":0\", &dequantize_node);\n AddNodeInput(requantize_node.name() + \":1\", &dequantize_node);\n AddNodeInput(requantize_node.name() + \":2\", &dequantize_node);\n new_nodes->push_back(dequantize_node);\n return OkStatus();\n },\n {}, output_graph_def));\n return OkStatus();\n}\nStatus MergeAdjacentRequantizes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(\n input_graph_def, \n {\"Requantize\",\n {\n {\"QuantizeV2\",\n {\n {\"Dequantize\",\n {\n {\"Requantize\",\n {\n {\"*\"},\n {\"*\"},\n {\"*\"},\n {\"RequantizationRange\"},\n {\"RequantizationRange\"},\n }\n },\n {\"Requantize\"},\n {\"Requantize\"},\n }\n },\n {\"Const\"},\n {\"Const\"},\n },\n },\n {\"QuantizeV2\"},\n {\"QuantizeV2\"},\n {\"Const\"},\n {\"Const\"},\n }\n }, \n [](const NodeMatch& match, const std::set& input_nodes,\n const std::set& output_nodes,\n std::vector* new_nodes) {\n const NodeDef& fake_requantize_node = match.node;\n const NodeDef& original_op_node =\n match.inputs[0].inputs[0].inputs[0].inputs[0].node;\n const NodeDef& fake_requantize_min_node = match.inputs[3].node;\n const NodeDef& fake_requantize_max_node = match.inputs[4].node;\n new_nodes->push_back(original_op_node);\n new_nodes->push_back(fake_requantize_min_node);\n new_nodes->push_back(fake_requantize_max_node);\n NodeDef requantize_node;\n requantize_node = fake_requantize_node;\n requantize_node.mutable_input()->Clear();\n AddNodeInput(original_op_node.name() + \":0\", &requantize_node);\n AddNodeInput(original_op_node.name() + \":1\", &requantize_node);\n AddNodeInput(original_op_node.name() + \":2\", &requantize_node);\n AddNodeInput(fake_requantize_min_node.name(), &requantize_node);\n AddNodeInput(fake_requantize_max_node.name(), &requantize_node);\n new_nodes->push_back(requantize_node);\n return OkStatus();\n },\n {}, output_graph_def));\n return OkStatus();\n}\nStatus HoistFakeQuants(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n GraphDef current_graph_def = input_graph_def;\n const int max_depth = 3;\n for (int depth = max_depth; depth > 0; --depth) {\n OpTypePattern pattern = {\"*\"};\n for (int i = 0; i < depth; ++i) {\n pattern = {\"*\", {pattern}};\n }\n pattern = {\"FakeQuantWithMinMaxVars\", {pattern, {\"Const\"}, {\"Const\"}}};\n GraphDef hoisted_graph_def;\n TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(\n current_graph_def, pattern,\n [depth](const NodeMatch& match, const std::set& input_nodes,\n const std::set& output_nodes,\n std::vector* new_nodes) {\n const NodeDef& fake_quant_node = match.node;\n const NodeDef& fake_quant_min_node = match.inputs[1].node;\n const NodeDef& fake_quant_max_node = match.inputs[2].node;\n std::vector linear_nodes;\n NodeMatch current_match = match;\n for (int i = 0; i <= depth; ++i) {\n linear_nodes.push_back(current_match.inputs[0].node);\n current_match = current_match.inputs[0];\n }\n NodeDef new_fake_quant_node;\n new_fake_quant_node = fake_quant_node;\n new_fake_quant_node.set_name(fake_quant_node.name() + \"_hoisted\");\n new_fake_quant_node.set_input(\n 0, linear_nodes[linear_nodes.size() - 2].input(0));\n new_nodes->push_back(new_fake_quant_node);\n new_nodes->push_back(fake_quant_min_node);\n new_nodes->push_back(fake_quant_max_node);\n linear_nodes[linear_nodes.size() - 2].set_input(\n 0, new_fake_quant_node.name());\n linear_nodes.front().set_name(fake_quant_node.name());\n for (const NodeDef& linear_node : linear_nodes) {\n new_nodes->push_back(linear_node);\n }\n return OkStatus();\n },\n {}, &hoisted_graph_def));\n current_graph_def = hoisted_graph_def;\n }\n *output_graph_def = current_graph_def;\n return OkStatus();\n}\nStatus QuantizeNodes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def) {\n std::set ops_to_ignore;\n if (context.params.count(\"ignore_op\") > 0) {\n for (const string& name : context.params.at(\"ignore_op\")) {\n ops_to_ignore.insert(name);\n }\n }\n const std::vector& op_list = GetQuantizedOpList();\n string op_pattern;\n bool is_first = true;\n std::map op_map;\n for (const QuantizedOpInfo& op_info : op_list) {\n if (ops_to_ignore.count(op_info.float_name) == 0) {\n strings::StrAppend(&op_pattern, (is_first ? \"\" : \"|\"),\n op_info.float_name);\n op_map.insert({op_info.float_name, op_info});\n is_first = false;\n }\n }\n GraphDef placeholder_graph_def;\n TF_RETURN_IF_ERROR(\n QuantizePlaceholders(input_graph_def, context, &placeholder_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(placeholder_graph_def));\n GraphDef hoisted_graph_def;\n TF_RETURN_IF_ERROR(\n HoistFakeQuants(placeholder_graph_def, context, &hoisted_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(hoisted_graph_def));\n GraphDef converted_graph_def;\n TF_RETURN_IF_ERROR(ConvertFakeQuantsToRequantize(hoisted_graph_def, context,\n &converted_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(converted_graph_def));\n float fallback_min;\n float fallback_max;\n bool has_fallback_range;\n TF_RETURN_IF_ERROR(ExtractRangeFromParams(\n context, \"fallback_min\", \"fallback_max\", &fallback_min, &fallback_max,\n &has_fallback_range));\n GraphDef quantized_graph_def;\n TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(\n converted_graph_def, {op_pattern},\n [&op_map, fallback_min, fallback_max, has_fallback_range](\n const NodeMatch& match, const std::set& input_nodes,\n const std::set& output_nodes,\n std::vector* new_nodes) {\n const NodeDef& float_node = match.node;\n const QuantizedOpInfo& op_info = op_map[float_node.op()];\n DataTypeVector input_types;\n DataTypeVector output_types;\n TF_RETURN_IF_ERROR(\n GetInOutTypes(float_node, &input_types, &output_types));\n bool are_all_float = true;\n for (int i = 0; i < float_node.input_size(); ++i) {\n if (op_info.unquantized_inputs.count(i)) {\n continue;\n }\n if (i >= input_types.size()) {\n LOG(ERROR) << \"input_types has incorrect size \"\n << input_types.size() << \" <= \" << i\n << \". Assuming everything else is floats.\";\n }\n if (i < input_types.size() && input_types[i] != DT_FLOAT) {\n are_all_float = false;\n }\n }\n for (const DataType& output_type : output_types) {\n if (output_type != DT_FLOAT) {\n are_all_float = false;\n }\n }\n if (!are_all_float) {\n CopyOriginalMatch(match, new_nodes);\n return OkStatus();\n }\n string namespace_prefix = float_node.name() + \"_eightbit\";\n std::vector quantized_input_names;\n for (int i = 0; i < float_node.input_size(); ++i) {\n if (op_info.unquantized_inputs.count(i)) {\n continue;\n }\n const string& input_name = float_node.input(i);\n string unique_input_name =\n namespace_prefix + \"/\" + UniqueNodeNameFromInput(input_name);\n NodeDef reshape_dims;\n reshape_dims.set_op(\"Const\");\n reshape_dims.set_name(unique_input_name + \"/reshape_dims\");\n AddNodeInput(\"^\" + NodeNameFromInput(input_name), &reshape_dims);\n SetNodeAttr(\"dtype\", DT_INT32, &reshape_dims);\n Tensor reshape_dims_tensor(DT_INT32, {1});\n reshape_dims_tensor.flat()(0) = -1;\n SetNodeTensorAttr(\"value\", reshape_dims_tensor, &reshape_dims);\n new_nodes->push_back(reshape_dims);\n NodeDef reduction_dims;\n reduction_dims.set_op(\"Const\");\n reduction_dims.set_name(unique_input_name + \"/reduction_dims\");\n AddNodeInput(\"^\" + NodeNameFromInput(input_name), &reduction_dims);\n SetNodeAttr(\"dtype\", DT_INT32, &reduction_dims);\n Tensor reduction_dims_tensor(DT_INT32, {1});\n reduction_dims_tensor.flat()(0) = 0;\n SetNodeTensorAttr(\"value\", reduction_dims_tensor,\n &reduction_dims);\n new_nodes->push_back(reduction_dims);\n NodeDef reshape_node;\n reshape_node.set_op(\"Reshape\");\n reshape_node.set_name(unique_input_name + \"/reshape\");\n SetNodeAttr(\"T\", DT_FLOAT, &reshape_node);\n AddNodeInput(input_name, &reshape_node);\n AddNodeInput(reshape_dims.name(), &reshape_node);\n new_nodes->push_back(reshape_node);\n NodeDef min_node;\n min_node.set_op(\"Min\");\n min_node.set_name(unique_input_name + \"/min\");\n SetNodeAttr(\"T\", DT_FLOAT, &min_node);\n SetNodeAttr(\"keep_dims\", false, &min_node);\n AddNodeInput(reshape_node.name(), &min_node);\n AddNodeInput(reduction_dims.name(), &min_node);\n new_nodes->push_back(min_node);\n NodeDef max_node;\n max_node.set_op(\"Max\");\n max_node.set_name(unique_input_name + \"/max\");\n SetNodeAttr(\"T\", DT_FLOAT, &max_node);\n SetNodeAttr(\"keep_dims\", false, &max_node);\n AddNodeInput(reshape_node.name(), &max_node);\n AddNodeInput(reduction_dims.name(), &max_node);\n new_nodes->push_back(max_node);\n NodeDef quantize_node;\n quantize_node.set_op(\"QuantizeV2\");\n quantize_node.set_name(unique_input_name + \"/quantize\");\n SetNodeAttr(\"T\", DT_QUINT8, &quantize_node);\n SetNodeAttr(\"mode\", \"MIN_FIRST\", &quantize_node);\n AddNodeInput(input_name, &quantize_node);\n AddNodeInput(min_node.name(), &quantize_node);\n AddNodeInput(max_node.name(), &quantize_node);\n new_nodes->push_back(quantize_node);\n quantized_input_names.push_back(quantize_node.name());\n }\n NodeDef quantized_main_node;\n quantized_main_node.set_op(\"Quantized\" + float_node.op());\n quantized_main_node.set_name(float_node.name() + \"/eightbit\");\n for (const string& attr_to_copy : op_info.attrs_to_copy) {\n CopyNodeAttr(float_node, attr_to_copy, attr_to_copy,\n &quantized_main_node);\n }\n for (const std::pair& dtype_to_set :\n op_info.dtypes_to_set) {\n SetNodeAttr(dtype_to_set.first, dtype_to_set.second,\n &quantized_main_node);\n }\n int quantized_input_index = 0;\n for (int i = 0; i < float_node.input_size(); ++i) {\n if (op_info.unquantized_inputs.count(i)) {\n AddNodeInput(float_node.input(i), &quantized_main_node);\n } else {\n const string& quantized_input_name =\n quantized_input_names[quantized_input_index];\n AddNodeInput(quantized_input_name + \":0\", &quantized_main_node);\n ++quantized_input_index;\n }\n }\n if (op_info.min_max_order == QuantizedOpInfo::CONTIGUOUS_MIN_MAX) {\n for (const string& quantized_input_name : quantized_input_names) {\n AddNodeInput(quantized_input_name + \":1\", &quantized_main_node);\n AddNodeInput(quantized_input_name + \":2\", &quantized_main_node);\n }\n } else {\n for (const string& quantized_input_name : quantized_input_names) {\n AddNodeInput(quantized_input_name + \":1\", &quantized_main_node);\n }\n for (const string& quantized_input_name : quantized_input_names) {\n AddNodeInput(quantized_input_name + \":2\", &quantized_main_node);\n }\n }\n new_nodes->push_back(quantized_main_node);\n string eight_bit_node_name;\n if (op_info.output_bit_depth == DT_QINT32) {\n string requantize_min_input;\n string requantize_max_input;\n if (has_fallback_range) {\n NodeDef fallback_min_node;\n fallback_min_node.set_op(\"Const\");\n fallback_min_node.set_name(quantized_main_node.name() +\n \"/fallback_min\");\n SetNodeAttr(\"dtype\", DT_FLOAT, &fallback_min_node);\n Tensor fallback_min_tensor(DT_FLOAT, {});\n fallback_min_tensor.flat()(0) = fallback_min;\n SetNodeTensorAttr(\"value\", fallback_min_tensor,\n &fallback_min_node);\n new_nodes->push_back(fallback_min_node);\n NodeDef fallback_max_node;\n fallback_max_node.set_op(\"Const\");\n fallback_max_node.set_name(quantized_main_node.name() +\n \"/fallback_max\");\n SetNodeAttr(\"dtype\", DT_FLOAT, &fallback_max_node);\n Tensor fallback_max_tensor(DT_FLOAT, {});\n fallback_max_tensor.flat()(0) = fallback_max;\n SetNodeTensorAttr(\"value\", fallback_max_tensor,\n &fallback_max_node);\n new_nodes->push_back(fallback_max_node);\n requantize_min_input = fallback_min_node.name();\n requantize_max_input = fallback_max_node.name();\n } else {\n NodeDef requant_range_node;\n requant_range_node.set_op(\"RequantizationRange\");\n requant_range_node.set_name(quantized_main_node.name() +\n \"/requant_range\");\n SetNodeAttr(\"Tinput\", DT_QINT32, &requant_range_node);\n AddNodeInput(quantized_main_node.name() + \":0\",\n &requant_range_node);\n AddNodeInput(quantized_main_node.name() + \":1\",\n &requant_range_node);\n AddNodeInput(quantized_main_node.name() + \":2\",\n &requant_range_node);\n new_nodes->push_back(requant_range_node);\n requantize_min_input = requant_range_node.name() + \":0\";\n requantize_max_input = requant_range_node.name() + \":1\";\n }\n NodeDef requantize_node;\n requantize_node.set_op(\"Requantize\");\n requantize_node.set_name(quantized_main_node.name() + \"/requantize\");\n SetNodeAttr(\"Tinput\", DT_QINT32, &requantize_node);\n SetNodeAttr(\"out_type\", DT_QUINT8, &requantize_node);\n AddNodeInput(quantized_main_node.name() + \":0\", &requantize_node);\n AddNodeInput(quantized_main_node.name() + \":1\", &requantize_node);\n AddNodeInput(quantized_main_node.name() + \":2\", &requantize_node);\n AddNodeInput(requantize_min_input, &requantize_node);\n AddNodeInput(requantize_max_input, &requantize_node);\n new_nodes->push_back(requantize_node);\n eight_bit_node_name = requantize_node.name();\n } else {\n eight_bit_node_name = quantized_main_node.name();\n }\n NodeDef dequantize_node;\n dequantize_node.set_op(\"Dequantize\");\n dequantize_node.set_name(float_node.name());\n SetNodeAttr(\"T\", DT_QUINT8, &dequantize_node);\n SetNodeAttr(\"mode\", \"MIN_FIRST\", &dequantize_node);\n AddNodeInput(eight_bit_node_name + \":0\", &dequantize_node);\n AddNodeInput(eight_bit_node_name + \":1\", &dequantize_node);\n AddNodeInput(eight_bit_node_name + \":2\", &dequantize_node);\n new_nodes->push_back(dequantize_node);\n return OkStatus();\n },\n {}, &quantized_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(quantized_graph_def));\n GraphDef merged_graph_def;\n TF_RETURN_IF_ERROR(MergeAdjacentRequantizes(quantized_graph_def, context,\n &merged_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(merged_graph_def));\n GraphDef deduped_graph_def;\n TF_RETURN_IF_ERROR(\n MergeDuplicateNodes(merged_graph_def, context, &deduped_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(deduped_graph_def));\n TF_RETURN_IF_ERROR(RemoveRedundantQuantizations(deduped_graph_def, context,\n output_graph_def));\n TF_RETURN_IF_ERROR(IsGraphValid(*output_graph_def));\n return OkStatus();\n}\nREGISTER_GRAPH_TRANSFORM(\"quantize_nodes\", QuantizeNodes);\nREGISTER_GRAPH_TRANSFORM(\"merge_duplicate_nodes\", MergeDuplicateNodes);\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#define EIGEN_USE_THREADS\n#include \"tensorflow/cc/ops/const_op.h\"\n#include \"tensorflow/cc/ops/image_ops.h\"\n#include \"tensorflow/cc/ops/nn_ops.h\"\n#include \"tensorflow/cc/ops/sendrecv_ops.h\"\n#include \"tensorflow/cc/ops/standard_ops.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/kernels/quantization_utils.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\n#include \"tensorflow/core/public/session.h\"\n#include \"tensorflow/tools/graph_transforms/transform_utils.h\"\nnamespace tensorflow {\nnamespace graph_transforms {\nStatus QuantizeNodes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus RemoveRedundantQuantizations(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus QuantizePlaceholders(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus MergeAdjacentRequantizes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus HoistFakeQuants(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nStatus MergeDuplicateNodes(const GraphDef& input_graph_def,\n const TransformFuncContext& context,\n GraphDef* output_graph_def);\nclass QuantizeNodesTest : public ::testing::Test {\n protected:\n void TestTransformedVersusFloatGraph(\n const TransformFunc& transform_function, const GraphDef& float_graph_def,\n const std::vector>& float_inputs,\n const std::vector>& transformed_inputs,\n const std::vector& output_names,\n const TransformFuncContext& in_context, double threshold,\n GraphDef* transformed_graph_def) {\n std::unique_ptr float_session(NewSession(SessionOptions()));\n TF_ASSERT_OK(float_session->Create(float_graph_def));\n std::vector float_outputs;\n TF_ASSERT_OK(\n float_session->Run(float_inputs, output_names, {}, &float_outputs));\n TransformFuncContext context(in_context);\n std::vector input_names;\n for (const std::pair float_input :\n float_inputs) {\n context.input_names.push_back(float_input.first);\n }\n context.output_names = output_names;\n TF_ASSERT_OK(\n transform_function(float_graph_def, context, transformed_graph_def));\n std::unique_ptr transformed_session(NewSession(SessionOptions()));\n TF_ASSERT_OK(transformed_session->Create(*transformed_graph_def));\n std::vector transformed_outputs;\n TF_ASSERT_OK(transformed_session->Run(transformed_inputs, output_names, {},\n &transformed_outputs));\n const int output_count = output_names.size();\n EXPECT_EQ(output_count, float_outputs.size());\n EXPECT_EQ(output_count, transformed_outputs.size());\n for (int i = 0; i < output_count; ++i) {\n test::ExpectTensorNear(float_outputs[i], transformed_outputs[i],\n threshold);\n }\n }\n void TestQuantizedVersusFloatGraph(\n const GraphDef& float_graph_def,\n const std::vector>& inputs,\n const std::vector& output_names) {\n GraphDef quantized_graph_def;\n TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, inputs,\n inputs, output_names, {}, 1.0,\n &quantized_graph_def);\n const std::set quantizable_ops = {\n \"Add\", \"BiasAdd\", \"Concat\", \"Conv2D\", \"MatMul\", \"Relu\",\n \"Relu6\", \"ResizeBilinear\", \"AvgPool\", \"MaxPool\", \"Mul\"};\n for (const NodeDef& node : quantized_graph_def.node()) {\n EXPECT_EQ(0, quantizable_ops.count(node.op()))\n << \"Found quantizable node \" << node.op() << \" for node named \"\n << node.name();\n }\n }\n void TestGraphWithInputRange(\n const GraphDef& float_graph_def,\n const std::vector>& float_inputs,\n const std::vector& output_names, float range_min,\n float range_max) {\n TransformFuncContext context;\n context.params[\"input_min\"] = {strings::StrCat(range_min)};\n context.params[\"input_max\"] = {strings::StrCat(range_max)};\n std::vector> quantized_inputs;\n for (const std::pair& float_input : float_inputs) {\n const Tensor& float_tensor = float_input.second;\n Tensor quantized_tensor(DT_QUINT8, float_tensor.shape());\n FloatTensorToQuantizedInPlace(float_tensor, range_min, range_max,\n &quantized_tensor);\n quantized_inputs.push_back({float_input.first, quantized_tensor});\n }\n GraphDef quantized_graph_def;\n TestTransformedVersusFloatGraph(\n QuantizeNodes, float_graph_def, float_inputs, quantized_inputs,\n output_names, context, 1.0, &quantized_graph_def);\n }\n void TestGraphWithFallbackRange(\n const GraphDef& float_graph_def,\n const std::vector>& float_inputs,\n const std::vector& output_names, float range_min, float range_max,\n GraphDef* quantized_graph_def) {\n TransformFuncContext context;\n context.params[\"fallback_min\"] = {strings::StrCat(range_min)};\n context.params[\"fallback_max\"] = {strings::StrCat(range_max)};\n TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def,\n float_inputs, float_inputs, output_names,\n context, 2.0, quantized_graph_def);\n }\n void TestIgnoreOps(std::initializer_list ops_to_ignore) {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n auto const_op = [&](const string& name, const TensorShape& shape,\n std::initializer_list values) {\n Tensor tensor(DT_FLOAT, shape);\n test::FillValues(&tensor, values);\n return Const(root.WithOpName(name), Input::Initializer(tensor));\n };\n int m = 1;\n int n = 1;\n int k = 1;\n Output a_op = const_op(\"a_op\", {m, k}, {2});\n Output b_op = const_op(\"b_op\", {k, n}, {3});\n Output c_op = const_op(\"c_op\", {m, k}, {1});\n Output d_op = const_op(\"d_op\", {k, n}, {4});\n Output mat_mul_op = MatMul(root.WithOpName(\"mat_mul_op\"), a_op, b_op);\n Output mul_op = Mul(root.WithOpName(\"mul\"), c_op, d_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TransformFuncContext context;\n if (ops_to_ignore.size() > 0) {\n context.params[\"ignore_op\"] = ops_to_ignore;\n }\n GraphDef quantized_graph_def;\n TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {},\n {\"mat_mul_op\", \"mul\"}, context, 1.0,\n &quantized_graph_def);\n for (const string& op_name : ops_to_ignore) {\n bool exists_in_quantized_graph = false;\n for (const NodeDef& node : quantized_graph_def.node()) {\n if (node.op() == op_name) {\n exists_in_quantized_graph = true;\n break;\n }\n }\n EXPECT_TRUE(exists_in_quantized_graph)\n << \"Op \" << op_name\n << \" should not have been replace by a quantized version\";\n }\n }\n void TestQuantizeMatMul(int m, int n, int k,\n const std::vector& a_values,\n const std::vector& b_values) {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor a_tensor(DT_FLOAT, TensorShape({m, k}));\n test::FillValues(&a_tensor, a_values);\n Output a_op = Const(root.WithOpName(\"a_op\"), Input::Initializer(a_tensor));\n Tensor b_tensor(DT_FLOAT, TensorShape({k, n}));\n test::FillValues(&b_tensor, b_values);\n Output b_op = Const(root.WithOpName(\"b_op\"), Input::Initializer(b_tensor));\n Output mat_mul_op = MatMul(root.WithOpName(\"mat_mul_op\"), a_op, b_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"mat_mul_op\"});\n }\n void TestQuantizeMatMulTiny() {\n TestQuantizeMatMul(1, 1, 1, {2}, {3});\n TestQuantizeMatMul(1, 2, 1, {1}, {2, 3});\n TestQuantizeMatMul(1, 1, 2, {1, 1}, {1, 1});\n TestQuantizeMatMul(1, 1, 2, {0, 0}, {1, 1});\n TestQuantizeMatMul(1, 1, 2, {1, 2}, {1, 2});\n }\n void TestQuantizeMatMulSmall() {\n TestQuantizeMatMul(2, 4, 3, {1, 2, 3, 4, 5, 6},\n {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});\n }\n void TestQuantizeMul() {\n using namespace ::tensorflow::ops; \n std::vector x_shape({10, 100});\n const size_t x_num_elements = TensorShape(x_shape).num_elements();\n std::vector x_values(x_num_elements);\n for (int i = 0; i < x_num_elements; ++i) {\n x_values[i] = (i % 256) / 256.0f;\n }\n std::vector y_shape({100});\n const size_t y_num_elements = TensorShape(y_shape).num_elements();\n std::vector y_values(y_num_elements);\n for (int i = 0; i < y_num_elements; ++i) {\n y_values[i] = ((i + 23) % 123) - 50;\n }\n Scope root = Scope::NewRootScope();\n Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));\n test::FillValues(&x_float_tensor, x_values);\n Output x = Const(root.WithOpName(\"x\"), Input::Initializer(x_float_tensor));\n Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));\n test::FillValues(&y_float_tensor, y_values);\n Output y = Const(root.WithOpName(\"y\"), Input::Initializer(y_float_tensor));\n Mul mul = Mul(root.WithOpName(\"mul\"), x, y);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"mul\"});\n }\n void TestQuantizeAdd() {\n using namespace ::tensorflow::ops; \n std::vector x_shape({10, 100});\n const size_t x_num_elements = TensorShape(x_shape).num_elements();\n std::vector x_values(x_num_elements);\n for (int i = 0; i < x_num_elements; ++i) {\n x_values[i] = (i % 256) / 256.0f;\n }\n std::vector y_shape({100});\n const size_t y_num_elements = TensorShape(y_shape).num_elements();\n std::vector y_values(y_num_elements);\n for (int i = 0; i < y_num_elements; ++i) {\n y_values[i] = ((i + 23) % 123) - 50;\n }\n Scope root = Scope::NewRootScope();\n Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));\n test::FillValues(&x_float_tensor, x_values);\n Output x = Const(root.WithOpName(\"x\"), Input::Initializer(x_float_tensor));\n Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));\n test::FillValues(&y_float_tensor, y_values);\n Output y = Const(root.WithOpName(\"y\"), Input::Initializer(y_float_tensor));\n Add add = Add(root.WithOpName(\"add\"), x, y);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"add\"});\n }\n void TestQuantizeConv2D(int depth, int input_width, int input_height,\n int input_batch_count, int filter_size,\n int filter_count, int stride, const string& padding,\n const std::vector& input_values,\n const std::vector& filter_values) {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_FLOAT, TensorShape({input_batch_count, input_height,\n input_width, depth}));\n test::FillValues(&input_tensor, input_values);\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor filter_tensor(\n DT_FLOAT, TensorShape({filter_size, filter_size, depth, filter_count}));\n test::FillValues(&filter_tensor, filter_values);\n Output filter_op =\n Const(root.WithOpName(\"filter_op\"), Input::Initializer(filter_tensor));\n Output conv_op = Conv2D(root.WithOpName(\"conv_op\"), input_op, filter_op,\n {1, stride, stride, 1}, padding);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"conv_op\"});\n }\n void TestQuantizeBiasAdd() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));\n test::FillIota(&input_tensor, 1);\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor offset_tensor(DT_FLOAT, TensorShape({6}));\n test::FillIota(&offset_tensor, 1);\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Output bias_add_op =\n BiasAdd(root.WithOpName(\"bias_add_op\"), input_op, offset_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"bias_add_op\"});\n }\n void TestQuantizeConcat() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor shape_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&shape_tensor, {0});\n Output shape_op =\n Const(root.WithOpName(\"shape_op\"), Input::Initializer(shape_tensor));\n Tensor a_tensor(DT_FLOAT, TensorShape({2, 2, 3}));\n test::FillValues(&a_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output a_op = Const(root.WithOpName(\"a_op\"), Input::Initializer(a_tensor));\n Tensor b_tensor(DT_FLOAT, TensorShape({2, 2, 3}));\n test::FillValues(&b_tensor,\n {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});\n Output b_op = Const(root.WithOpName(\"b_op\"), Input::Initializer(b_tensor));\n Output concat_op =\n Concat(root.WithOpName(\"concat_op\"), {a_op, b_op}, shape_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"concat_op\"});\n }\n void TestQuantizeRelu() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));\n test::FillValues(&constant_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output constant_op = Const(root.WithOpName(\"constant_op\"),\n Input::Initializer(constant_tensor));\n Output relu_op = Relu(root.WithOpName(\"relu_op\"), constant_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"relu_op\"});\n }\n void TestQuantizeRelu6() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));\n test::FillValues(&constant_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output constant_op = Const(root.WithOpName(\"constant_op\"),\n Input::Initializer(constant_tensor));\n Output relu6_op = Relu6(root.WithOpName(\"relu6_op\"), constant_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"relu6_op\"});\n }\n void TestQuantizeMaxPool() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));\n test::FillValues(&constant_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output constant_op = Const(root.WithOpName(\"constant_op\"),\n Input::Initializer(constant_tensor));\n Output max_pool_op = MaxPool(root.WithOpName(\"max_pool_op\"), constant_op,\n {1, 2, 2, 1}, {1, 1, 1, 1}, \"SAME\");\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"max_pool_op\"});\n }\n void TestQuantizeAvgPool() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));\n test::FillValues(&constant_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output constant_op = Const(root.WithOpName(\"constant_op\"),\n Input::Initializer(constant_tensor));\n Output avg_pool_op = AvgPool(root.WithOpName(\"avg_pool_op\"), constant_op,\n {1, 2, 2, 1}, {1, 1, 1, 1}, \"SAME\");\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"avg_pool_op\"});\n }\n void TestQuantizeReshape() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor constant_tensor(DT_FLOAT, TensorShape({4, 5}));\n test::FillValues(&constant_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18, 19, 20});\n Output constant_op = Const(root.WithOpName(\"constant_op\"),\n Input::Initializer(constant_tensor));\n Output reshape_op =\n Reshape(root.WithOpName(\"reshape_op\"), constant_op, {10, 2});\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TestQuantizedVersusFloatGraph(float_graph_def, {}, {\"reshape_op\"});\n }\n void TestRemoveRedundantQuantization() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor quantized_tensor(DT_QUINT8, TensorShape({}));\n test::FillValues(&quantized_tensor, {0});\n Output quantized_op = Const(root.WithOpName(\"quantized_op\"),\n Input::Initializer(quantized_tensor));\n Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_min_tensor, {2.0f});\n Output quantized_min_op = Const(root.WithOpName(\"quantized_min_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_max_tensor, {2.0f});\n Output quantized_max_op = Const(root.WithOpName(\"quantized_max_op\"),\n Input::Initializer(quantized_min_tensor));\n Output dequantize_op =\n Dequantize(root.WithOpName(\"dequantize_op\"), quantized_op,\n quantized_min_op, quantized_max_op);\n Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));\n test::FillValues(&dequantize_reshape_dims_tensor, {-1});\n Output dequantize_reshape_dims =\n Const(root.WithOpName(\"dequantize_reshape_dims\"),\n Input::Initializer(dequantize_reshape_dims_tensor));\n Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&dequantize_reduction_dims_tensor, {0});\n Output dequantize_reduction_dims =\n Const(root.WithOpName(\"dequantize_reduction_dims\"),\n Input::Initializer(dequantize_reduction_dims_tensor));\n Output dequantize_reshape = Reshape(root.WithOpName(\"dequantize_reshape\"),\n dequantize_op, dequantize_reshape_dims);\n Output dequantize_min =\n Min(root.WithOpName(\"dequantize_min\"), dequantize_reshape,\n dequantize_reduction_dims, Min::Attrs().KeepDims(false));\n Output dequantize_max =\n Max(root.WithOpName(\"dequantize_max\"), dequantize_reshape,\n dequantize_reduction_dims, Max::Attrs().KeepDims(false));\n QuantizeV2 quantize_op(root.WithOpName(\"quantize_op\"), dequantize_op,\n dequantize_min, dequantize_max, DT_QUINT8,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Output final_dequantize =\n Dequantize(root.WithOpName(\"final_dequantize\"), quantize_op.output,\n quantize_op.output_min, quantize_op.output_max);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef removed_graph_def;\n TestTransformedVersusFloatGraph(\n RemoveRedundantQuantizations, float_graph_def, {}, {},\n {\"final_dequantize\"}, {}, 1.0, &removed_graph_def);\n std::map node_map;\n MapNamesToNodes(removed_graph_def, &node_map);\n EXPECT_EQ(1, node_map.count(\"final_dequantize\"));\n EXPECT_EQ(\"quantized_op\", node_map.at(\"final_dequantize\")->input(0));\n }\n void TestRemoveRedundantQuantizationWithBiasAdd() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6}));\n test::FillValues(&quantized_tensor, {0, 0, 0, 0, 0, 0});\n Output quantized_op = Const(root.WithOpName(\"quantized_op\"),\n Input::Initializer(quantized_tensor));\n Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_min_tensor, {2.0f});\n Output quantized_min_op = Const(root.WithOpName(\"quantized_min_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_max_tensor, {2.0f});\n Output quantized_max_op = Const(root.WithOpName(\"quantized_max_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor offset_tensor(DT_QUINT8, TensorShape({6}));\n test::FillValues(&offset_tensor, {1, 2, 3, 4, 5, 6});\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_min_tensor, {0.0f});\n Output offset_min_op = Const(root.WithOpName(\"offset_min_op\"),\n Input::Initializer(offset_min_tensor));\n Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_max_tensor, {255.0f});\n Output offset_max_op = Const(root.WithOpName(\"offset_max_op\"),\n Input::Initializer(offset_max_tensor));\n QuantizedBiasAdd quantized_bias_add_op(\n root.WithOpName(\"bias_add_op\"), quantized_op, offset_op,\n quantized_min_op, quantized_max_op, offset_min_op, offset_max_op,\n DT_QINT32);\n RequantizationRange requantization_range_op(\n root.WithOpName(\"requantization_range_op\"),\n quantized_bias_add_op.output, quantized_bias_add_op.min_out,\n quantized_bias_add_op.max_out);\n Requantize requantize_op(\n root.WithOpName(\"requantize_op\"), quantized_bias_add_op.output,\n quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,\n requantization_range_op.output_min, requantization_range_op.output_max,\n DT_QUINT8);\n Output dequantize_op =\n Dequantize(root.WithOpName(\"dequantize_op\"), requantize_op.output,\n requantize_op.output_min, requantize_op.output_max);\n Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));\n test::FillValues(&dequantize_reshape_dims_tensor, {-1});\n Output dequantize_reshape_dims =\n Const(root.WithOpName(\"dequantize_reshape_dims\"),\n Input::Initializer(dequantize_reshape_dims_tensor));\n Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&dequantize_reduction_dims_tensor, {0});\n Output dequantize_reduction_dims =\n Const(root.WithOpName(\"dequantize_reduction_dims\"),\n Input::Initializer(dequantize_reduction_dims_tensor));\n Output dequantize_reshape = Reshape(root.WithOpName(\"dequantize_reshape\"),\n dequantize_op, dequantize_reshape_dims);\n Output dequantize_min =\n Min(root.WithOpName(\"dequantize_min\"), dequantize_reshape,\n dequantize_reduction_dims, Min::Attrs().KeepDims(false));\n Output dequantize_max =\n Max(root.WithOpName(\"dequantize_max\"), dequantize_reshape,\n dequantize_reduction_dims, Max::Attrs().KeepDims(false));\n QuantizeV2 quantize_op(root.WithOpName(\"quantize_op\"), dequantize_op,\n dequantize_min, dequantize_max, DT_QUINT8,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Output final_dequantize =\n Dequantize(root.WithOpName(\"final_dequantize\"), quantize_op.output,\n quantize_op.output_min, quantize_op.output_max);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef removed_graph_def;\n TestTransformedVersusFloatGraph(\n RemoveRedundantQuantizations, float_graph_def, {}, {},\n {\"final_dequantize\"}, {}, 1.0, &removed_graph_def);\n std::map node_map;\n MapNamesToNodes(removed_graph_def, &node_map);\n EXPECT_EQ(1, node_map.count(\"final_dequantize\"));\n EXPECT_EQ(\"requantize_op\", node_map.at(\"final_dequantize\")->input(0));\n }\n void TestQuantizeResizeBilinear() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor size_tensor(DT_INT32, TensorShape({2}));\n test::FillValues(&size_tensor, {256, 256});\n Output constant_op = Const(root.WithOpName(\"size_tensor_op\"),\n Input::Initializer(size_tensor));\n Output placeholder_op =\n Placeholder(root.WithOpName(\"placeholder_op\"), DT_FLOAT);\n Output resize_bilinear_op = ResizeBilinear(\n root.WithOpName(\"resize_bilinear_op\"), placeholder_op, constant_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n Tensor input_tensor(DT_FLOAT, {1, 128, 128, 3});\n test::FillFn(&input_tensor, [](int) { return 100.0f; });\n TestQuantizedVersusFloatGraph(float_graph_def,\n {{\"placeholder_op\", input_tensor}},\n {\"resize_bilinear_op\"});\n }\n void TestRemoveRedundantQuantizationWithMultipleOutputs() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6}));\n test::FillValues(&quantized_tensor, {0, 0, 0, 0, 0, 0});\n Output quantized_op = Const(root.WithOpName(\"quantized_op\"),\n Input::Initializer(quantized_tensor));\n Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_min_tensor, {2.0f});\n Output quantized_min_op = Const(root.WithOpName(\"quantized_min_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_max_tensor, {2.0f});\n Output quantized_max_op = Const(root.WithOpName(\"quantized_max_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor offset_tensor(DT_QUINT8, TensorShape({6}));\n test::FillValues(&offset_tensor, {1, 2, 3, 4, 5, 6});\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_min_tensor, {0.0f});\n Output offset_min_op = Const(root.WithOpName(\"offset_min_op\"),\n Input::Initializer(offset_min_tensor));\n Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_max_tensor, {255.0f});\n Output offset_max_op = Const(root.WithOpName(\"offset_max_op\"),\n Input::Initializer(offset_max_tensor));\n QuantizedBiasAdd quantized_bias_add_op(\n root.WithOpName(\"bias_add_op\"), quantized_op, offset_op,\n quantized_min_op, quantized_max_op, offset_min_op, offset_max_op,\n DT_QINT32);\n RequantizationRange requantization_range_op(\n root.WithOpName(\"requantization_range_op\"),\n quantized_bias_add_op.output, quantized_bias_add_op.min_out,\n quantized_bias_add_op.max_out);\n Requantize requantize_op(\n root.WithOpName(\"requantize_op\"), quantized_bias_add_op.output,\n quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,\n requantization_range_op.output_min, requantization_range_op.output_max,\n DT_QUINT8);\n Output dequantize_op =\n Dequantize(root.WithOpName(\"dequantize_op\"), requantize_op.output,\n requantize_op.output_min, requantize_op.output_max);\n Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));\n test::FillValues(&dequantize_reshape_dims_tensor, {-1});\n Output dequantize_reshape_dims =\n Const(root.WithOpName(\"dequantize_reshape_dims\"),\n Input::Initializer(dequantize_reshape_dims_tensor));\n Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&dequantize_reduction_dims_tensor, {0});\n Output dequantize_reduction_dims =\n Const(root.WithOpName(\"dequantize_reduction_dims\"),\n Input::Initializer(dequantize_reduction_dims_tensor));\n Output dequantize_reshape = Reshape(root.WithOpName(\"dequantize_reshape\"),\n dequantize_op, dequantize_reshape_dims);\n Output dequantize_min =\n Min(root.WithOpName(\"dequantize_min\"), dequantize_reshape,\n dequantize_reduction_dims, Min::Attrs().KeepDims(false));\n Output dequantize_max =\n Max(root.WithOpName(\"dequantize_max\"), dequantize_reshape,\n dequantize_reduction_dims, Max::Attrs().KeepDims(false));\n QuantizeV2 quantize_op(root.WithOpName(\"quantize_op\"), dequantize_op,\n dequantize_min, dequantize_max, DT_QUINT8,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Output final_dequantize =\n Dequantize(root.WithOpName(\"final_dequantize\"), quantize_op.output,\n quantize_op.output_min, quantize_op.output_max);\n Output relu_op = Relu(root.WithOpName(\"relu_op\"), dequantize_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef removed_graph_def;\n TestTransformedVersusFloatGraph(\n RemoveRedundantQuantizations, float_graph_def, {}, {},\n {\"final_dequantize\", \"relu_op\"}, {}, 1.0, &removed_graph_def);\n std::map op_type_count;\n for (const NodeDef& node : removed_graph_def.node()) {\n ++op_type_count[node.op()];\n }\n EXPECT_EQ(2, op_type_count[\"Dequantize\"]);\n }\n void TestQuantizePlaceholders() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Output placeholder_op =\n Placeholder(root.WithOpName(\"placeholder_op\"), DT_FLOAT);\n Output relu_op = Relu(root.WithOpName(\"relu_op\"), placeholder_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n TransformFuncContext context;\n context.input_names = {\"placeholder_op\"};\n context.output_names = {\"relu_op\"};\n context.params = {{\"input_min\", {\"-10.0\"}}, {\"input_max\", {\"10.0\"}}};\n GraphDef quantized_graph_def;\n TF_ASSERT_OK(\n QuantizePlaceholders(float_graph_def, context, &quantized_graph_def));\n Tensor input_tensor(DT_FLOAT, {});\n input_tensor.flat()(0) = 5.0f;\n TestQuantizedVersusFloatGraph(\n float_graph_def, {{\"placeholder_op\", input_tensor}}, {\"relu_op\"});\n std::map node_map;\n MapNamesToNodes(quantized_graph_def, &node_map);\n EXPECT_NE(\"placeholder_op\", node_map.at(\"relu_op\")->input(0));\n EXPECT_EQ(\"Placeholder\", node_map.at(\"placeholder_op\")->op());\n EXPECT_EQ(DT_QUINT8,\n node_map.at(\"placeholder_op\")->attr().at(\"dtype\").type());\n }\n void TestInputRange() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n const int width = 100;\n Tensor a_data(DT_FLOAT, TensorShape({1, width}));\n test::FillIota(&a_data, 1.0f);\n Output a_const = Const(root.WithOpName(\"a\"), Input::Initializer(a_data));\n Output placeholder = Placeholder(root.WithOpName(\"placeholder\"), DT_FLOAT);\n Output bias_add =\n BiasAdd(root.WithOpName(\"bias_add\"), a_const, placeholder);\n GraphDef graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&graph_def));\n Tensor placeholder_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&placeholder_tensor, 1.0f);\n TestGraphWithInputRange(graph_def, {{\"placeholder\", placeholder_tensor}},\n {\"bias_add\"}, 0.0f, 100.0f);\n }\n void TestFallbackRange() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n const int width = 100;\n Tensor a_data(DT_FLOAT, TensorShape({1, width}));\n test::FillIota(&a_data, 1.0f);\n Output a_const = Const(root.WithOpName(\"a\"), Input::Initializer(a_data));\n Output placeholder = Placeholder(root.WithOpName(\"placeholder\"), DT_FLOAT);\n Output bias_add =\n BiasAdd(root.WithOpName(\"bias_add\"), a_const, placeholder);\n GraphDef graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&graph_def));\n Tensor placeholder_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&placeholder_tensor, 1.0f);\n GraphDef quantized_graph_def;\n TestGraphWithFallbackRange(graph_def, {{\"placeholder\", placeholder_tensor}},\n {\"bias_add\"}, 0.0f, 200.0f,\n &quantized_graph_def);\n for (const NodeDef& node : quantized_graph_def.node()) {\n EXPECT_NE(\"RequantizationRange\", node.op());\n }\n }\n void TestConvertFakeQuantsToRequantize() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));\n test::FillIota(&input_tensor, 1);\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor offset_tensor(DT_FLOAT, TensorShape({6}));\n test::FillIota(&offset_tensor, 1);\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Output bias_add_op =\n BiasAdd(root.WithOpName(\"bias_add_op\"), input_op, offset_op);\n Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_min_tensor, {0.0f});\n Output fake_quant_min_op = Const(root.WithOpName(\"fake_quant_min_op\"),\n Input::Initializer(fake_quant_min_tensor));\n Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_max_tensor, {18.0f});\n Output fake_quant_max_op = Const(root.WithOpName(\"fake_quant_max_op\"),\n Input::Initializer(fake_quant_max_tensor));\n Output fake_quant_op =\n FakeQuantWithMinMaxVars(root.WithOpName(\"fake_quant_op\"), bias_add_op,\n fake_quant_min_op, fake_quant_max_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef converted_graph_def;\n TestTransformedVersusFloatGraph(ConvertFakeQuantsToRequantize,\n float_graph_def, {}, {}, {\"fake_quant_op\"},\n {}, 1.0, &converted_graph_def);\n for (const NodeDef& node : converted_graph_def.node()) {\n EXPECT_NE(\"FakeQuantWithMinMaxVars\", node.op());\n }\n }\n void TestMergeAdjacentRequantizes() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_QUINT8, TensorShape({1, 1, 2, 6}));\n test::FillValues(&input_tensor,\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor input_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&input_min_tensor, {0.0f});\n Output input_min_op = Const(root.WithOpName(\"input_min_op\"),\n Input::Initializer(input_min_tensor));\n Tensor input_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&input_max_tensor, {255.0f});\n Output input_max_op = Const(root.WithOpName(\"input_max_op\"),\n Input::Initializer(input_max_tensor));\n Tensor offset_tensor(DT_QUINT8, TensorShape({6}));\n test::FillValues(&offset_tensor, {1, 2, 3, 4, 5, 6});\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_min_tensor, {0.0f});\n Output offset_min_op = Const(root.WithOpName(\"offset_min_op\"),\n Input::Initializer(offset_min_tensor));\n Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&offset_max_tensor, {255.0f});\n Output offset_max_op = Const(root.WithOpName(\"offset_max_op\"),\n Input::Initializer(offset_max_tensor));\n QuantizedBiasAdd quantized_bias_add_op(\n root.WithOpName(\"quantized_bias_add_op\"), input_op, offset_op,\n input_min_op, input_max_op, offset_min_op, offset_max_op, DT_QINT32);\n RequantizationRange requantization_range_op(\n root.WithOpName(\"requantization_range_op\"),\n quantized_bias_add_op.output, quantized_bias_add_op.min_out,\n quantized_bias_add_op.max_out);\n Requantize requantize_op(\n root.WithOpName(\"requantize_op\"), quantized_bias_add_op.output,\n quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,\n requantization_range_op.output_min, requantization_range_op.output_max,\n DT_QUINT8);\n Output dequantize_op =\n Dequantize(root.WithOpName(\"dequantize_op\"), requantize_op.output,\n requantize_op.output_min, requantize_op.output_max,\n Dequantize::Attrs().Mode(\"MIN_FIRST\"));\n Tensor quantize_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantize_min_tensor, {0.0f});\n Output quantize_min_op = Const(root.WithOpName(\"quantize_min_op\"),\n Input::Initializer(quantize_min_tensor));\n Tensor quantize_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantize_max_tensor, {255.0f});\n Output quantize_max_op = Const(root.WithOpName(\"quantize_max_op\"),\n Input::Initializer(quantize_max_tensor));\n QuantizeV2 quantize_op(root.WithOpName(\"quantize_op\"), dequantize_op,\n quantize_min_op, quantize_max_op, DT_QINT32,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Tensor fake_requantize_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_requantize_min_tensor, {0.0f});\n Output fake_requantize_min_op =\n Const(root.WithOpName(\"fake_requantize_min_op\"),\n Input::Initializer(fake_requantize_min_tensor));\n Tensor fake_requantize_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_requantize_max_tensor, {255.0f});\n Output fake_requantize_max_op =\n Const(root.WithOpName(\"fake_requantize_max_op\"),\n Input::Initializer(fake_requantize_max_tensor));\n Requantize fake_requantize_op(\n root.WithOpName(\"fake_requantize_op\"), quantize_op.output,\n quantize_op.output_min, quantize_op.output_max, fake_requantize_min_op,\n fake_requantize_max_op, DT_QUINT8);\n Output fake_dequantize_op = Dequantize(\n root.WithOpName(\"fake_dequantize_op\"), fake_requantize_op.output,\n fake_requantize_op.output_min, fake_requantize_op.output_max,\n Dequantize::Attrs().Mode(\"MIN_FIRST\"));\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef converted_graph_def;\n TestTransformedVersusFloatGraph(MergeAdjacentRequantizes, float_graph_def,\n {}, {}, {\"fake_dequantize_op\"}, {}, 1.0,\n &converted_graph_def);\n int requantize_count = 0;\n for (const NodeDef& node : converted_graph_def.node()) {\n if (node.op() == \"Requantize\") {\n ++requantize_count;\n }\n }\n EXPECT_EQ(1, requantize_count);\n }\n void TestConvertFakeQuantsEndToEnd() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));\n test::FillIota(&input_tensor, 1);\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor offset_tensor(DT_FLOAT, TensorShape({6}));\n test::FillIota(&offset_tensor, 1);\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Output bias_add_op =\n BiasAdd(root.WithOpName(\"bias_add_op\"), input_op, offset_op);\n Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_min_tensor, {0.0f});\n Output fake_quant_min_op = Const(root.WithOpName(\"fake_quant_min_op\"),\n Input::Initializer(fake_quant_min_tensor));\n Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_max_tensor, {18.0f});\n Output fake_quant_max_op = Const(root.WithOpName(\"fake_quant_max_op\"),\n Input::Initializer(fake_quant_max_tensor));\n Output fake_quant_op =\n FakeQuantWithMinMaxVars(root.WithOpName(\"fake_quant_op\"), bias_add_op,\n fake_quant_min_op, fake_quant_max_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef converted_graph_def;\n TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {},\n {\"fake_quant_op\"}, {}, 1.0,\n &converted_graph_def);\n int requantize_count = 0;\n for (const NodeDef& node : converted_graph_def.node()) {\n EXPECT_NE(\"FakeQuantWithMinMaxVars\", node.op());\n if (node.op() == \"Requantize\") {\n ++requantize_count;\n }\n }\n EXPECT_EQ(1, requantize_count);\n }\n void TestHoistFakeQuants() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));\n test::FillIota(&input_tensor, 1);\n Output input_op =\n Const(root.WithOpName(\"input_op\"), Input::Initializer(input_tensor));\n Tensor offset_tensor(DT_FLOAT, TensorShape({6}));\n test::FillIota(&offset_tensor, 1);\n Output offset_op =\n Const(root.WithOpName(\"offset_op\"), Input::Initializer(offset_tensor));\n Output bias_add_op =\n BiasAdd(root.WithOpName(\"bias_add_op\"), input_op, offset_op);\n Output relu_op = Relu(root.WithOpName(\"relu_op\"), bias_add_op);\n Output max_pool_op = MaxPool(root.WithOpName(\"max_pool_op\"), relu_op,\n {1, 2, 2, 1}, {1, 1, 1, 1}, \"SAME\");\n Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_min_tensor, {0.0f});\n Output fake_quant_min_op = Const(root.WithOpName(\"fake_quant_min_op\"),\n Input::Initializer(fake_quant_min_tensor));\n Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&fake_quant_max_tensor, {18.0f});\n Output fake_quant_max_op = Const(root.WithOpName(\"fake_quant_max_op\"),\n Input::Initializer(fake_quant_max_tensor));\n Output fake_quant_op =\n FakeQuantWithMinMaxVars(root.WithOpName(\"fake_quant_op\"), max_pool_op,\n fake_quant_min_op, fake_quant_max_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef converted_graph_def;\n TestTransformedVersusFloatGraph(HoistFakeQuants, float_graph_def, {}, {},\n {\"fake_quant_op\"}, {}, 1.0,\n &converted_graph_def);\n std::map node_map;\n MapNamesToNodes(converted_graph_def, &node_map);\n EXPECT_EQ(\"MaxPool\", node_map.at(\"fake_quant_op\")->op());\n EXPECT_EQ(\"FakeQuantWithMinMaxVars\",\n node_map.at(node_map.at(\"relu_op\")->input(0))->op());\n }\n void TestMergeDuplicateQuantizes() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor quantized_tensor(DT_QUINT8, TensorShape({}));\n test::FillValues(&quantized_tensor, {0});\n Output quantized_op = Const(root.WithOpName(\"quantized_op\"),\n Input::Initializer(quantized_tensor));\n Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_min_tensor, {2.0f});\n Output quantized_min_op = Const(root.WithOpName(\"quantized_min_op\"),\n Input::Initializer(quantized_min_tensor));\n Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));\n test::FillValues(&quantized_max_tensor, {2.0f});\n Output quantized_max_op = Const(root.WithOpName(\"quantized_max_op\"),\n Input::Initializer(quantized_min_tensor));\n Output dequantize_op =\n Dequantize(root.WithOpName(\"dequantize_op\"), quantized_op,\n quantized_min_op, quantized_max_op);\n Tensor quantize_reshape_dims1_tensor(DT_INT32, TensorShape({1}));\n test::FillValues(&quantize_reshape_dims1_tensor, {-1});\n Output quantize_reshape_dims1 =\n Const(root.WithOpName(\"dequantize_reshape_dims1\"),\n Input::Initializer(quantize_reshape_dims1_tensor));\n Tensor quantize_reduction_dims1_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&quantize_reduction_dims1_tensor, {0});\n Output quantize_reduction_dims1 =\n Const(root.WithOpName(\"quantize_reduction_dims1\"),\n Input::Initializer(quantize_reduction_dims1_tensor));\n Output quantize_reshape1 = Reshape(root.WithOpName(\"quantize_reshape1\"),\n dequantize_op, quantize_reshape_dims1);\n Output quantize_min1 =\n Min(root.WithOpName(\"quantize_min1\"), quantize_reshape1,\n quantize_reduction_dims1, Min::Attrs().KeepDims(false));\n Output quantize_max1 =\n Max(root.WithOpName(\"quantize_max1\"), quantize_reshape1,\n quantize_reduction_dims1, Max::Attrs().KeepDims(false));\n QuantizeV2 quantize_op1(root.WithOpName(\"quantize_op1\"), dequantize_op,\n quantize_min1, quantize_max1, DT_QUINT8,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Tensor quantize_reshape_dims2_tensor(DT_INT32, TensorShape({1}));\n test::FillValues(&quantize_reshape_dims2_tensor, {-1});\n Output quantize_reshape_dims2 =\n Const(root.WithOpName(\"dequantize_reshape_dims2\"),\n Input::Initializer(quantize_reshape_dims2_tensor));\n Tensor quantize_reduction_dims2_tensor(DT_INT32, TensorShape({}));\n test::FillValues(&quantize_reduction_dims2_tensor, {0});\n Output quantize_reduction_dims2 =\n Const(root.WithOpName(\"quantize_reduction_dims2\"),\n Input::Initializer(quantize_reduction_dims2_tensor));\n Output quantize_reshape2 = Reshape(root.WithOpName(\"quantize_reshape2\"),\n dequantize_op, quantize_reshape_dims2);\n Output quantize_min2 =\n Min(root.WithOpName(\"quantize_min2\"), quantize_reshape2,\n quantize_reduction_dims2, Min::Attrs().KeepDims(false));\n Output quantize_max2 =\n Max(root.WithOpName(\"quantize_max2\"), quantize_reshape2,\n quantize_reduction_dims2, Max::Attrs().KeepDims(false));\n QuantizeV2 quantize_op2(root.WithOpName(\"quantize_op2\"), dequantize_op,\n quantize_min1, quantize_max1, DT_QUINT8,\n QuantizeV2::Attrs().Mode(\"MIN_FIRST\"));\n Output final_dequantize1 =\n Dequantize(root.WithOpName(\"final_dequantize1\"), quantize_op1.output,\n quantize_op1.output_min, quantize_op1.output_max);\n Output final_dequantize2 =\n Dequantize(root.WithOpName(\"final_dequantize2\"), quantize_op2.output,\n quantize_op2.output_min, quantize_op2.output_max);\n Output add_op =\n Add(root.WithOpName(\"add_op\"), final_dequantize1, final_dequantize2);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef merged_graph_def;\n TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},\n {}, {\"add_op\"}, {}, 1.0, &merged_graph_def);\n std::map op_map;\n for (const NodeDef& node : merged_graph_def.node()) {\n ++op_map[node.op()];\n }\n EXPECT_EQ(1, op_map[\"QuantizeV2\"]);\n }\n void TestMergeDuplicateConsts() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n const int width = 10;\n Tensor a_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&a_tensor, 1.0f);\n Output a_op = Const(root.WithOpName(\"a_op\"), Input::Initializer(a_tensor));\n Tensor b_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&b_tensor, 1.0f);\n Output b_op = Const(root.WithOpName(\"b_op\"), Input::Initializer(b_tensor));\n Output add_op = Add(root.WithOpName(\"add_op\"), a_op, b_op);\n Tensor c_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&c_tensor, 2.0f);\n Output c_op = Const(root.WithOpName(\"c_op\"), Input::Initializer(c_tensor));\n Output mul_op = Mul(root.WithOpName(\"mul_op\"), add_op, c_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef merged_graph_def;\n TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},\n {}, {\"mul_op\"}, {}, 1.0, &merged_graph_def);\n std::map node_map;\n MapNamesToNodes(merged_graph_def, &node_map);\n EXPECT_EQ(1, (node_map.count(\"a_op\") + node_map.count(\"b_op\")));\n string remaining_const;\n if (node_map.count(\"a_op\")) {\n remaining_const = \"a_op\";\n } else {\n remaining_const = \"b_op\";\n }\n EXPECT_EQ(remaining_const, node_map[\"add_op\"]->input(0));\n EXPECT_EQ(remaining_const, node_map[\"add_op\"]->input(1));\n EXPECT_EQ(1, node_map.count(\"c_op\"));\n EXPECT_EQ(\"add_op\", node_map[\"mul_op\"]->input(0));\n EXPECT_EQ(\"c_op\", node_map[\"mul_op\"]->input(1));\n }\n void TestMergeDuplicatesNested() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n const int width = 10;\n Tensor a_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&a_tensor, 1.0f);\n Output a_op = Const(root.WithOpName(\"a_op\"), Input::Initializer(a_tensor));\n Output a_relu_op = Relu(root.WithOpName(\"a_relu_op\"), a_op);\n Tensor b_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&b_tensor, 1.0f);\n Output b_op = Const(root.WithOpName(\"b_op\"), Input::Initializer(b_tensor));\n Output b_relu_op = Relu(root.WithOpName(\"b_relu_op\"), b_op);\n Output add_op = Add(root.WithOpName(\"add_op\"), a_relu_op, b_relu_op);\n Tensor c_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&c_tensor, 2.0f);\n Output c_op = Const(root.WithOpName(\"c_op\"), Input::Initializer(c_tensor));\n Output mul_op = Mul(root.WithOpName(\"mul_op\"), add_op, c_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef merged_graph_def;\n TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},\n {}, {\"mul_op\"}, {}, 1.0, &merged_graph_def);\n std::map node_map;\n MapNamesToNodes(merged_graph_def, &node_map);\n EXPECT_EQ(1, (node_map.count(\"a_op\") + node_map.count(\"b_op\")));\n EXPECT_EQ(1, (node_map.count(\"a_relu_op\") + node_map.count(\"b_relu_op\")));\n string remaining_relu;\n if (node_map.count(\"a_relu_op\")) {\n remaining_relu = \"a_relu_op\";\n } else {\n remaining_relu = \"b_relu_op\";\n }\n EXPECT_EQ(remaining_relu, node_map[\"add_op\"]->input(0));\n EXPECT_EQ(remaining_relu, node_map[\"add_op\"]->input(1));\n EXPECT_EQ(1, node_map.count(\"c_op\"));\n EXPECT_EQ(\"add_op\", node_map[\"mul_op\"]->input(0));\n EXPECT_EQ(\"c_op\", node_map[\"mul_op\"]->input(1));\n }\n void TestMergeDuplicatesInOut() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n const int width = 10;\n Tensor a_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&a_tensor, 1.0f);\n Output a_op = Const(root.WithOpName(\"a_op\"), Input::Initializer(a_tensor));\n Output a_relu_op = Relu(root.WithOpName(\"a_relu_op\"), a_op);\n Tensor b_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&b_tensor, 1.0f);\n Output b_op = Const(root.WithOpName(\"b_op\"), Input::Initializer(b_tensor));\n Output b_relu_op = Relu(root.WithOpName(\"b_relu_op\"), b_op);\n Output add_op = Add(root.WithOpName(\"add_op\"), a_relu_op, b_relu_op);\n Tensor c_tensor(DT_FLOAT, TensorShape({width}));\n test::FillIota(&c_tensor, 2.0f);\n Output c_op = Const(root.WithOpName(\"c_op\"), Input::Initializer(c_tensor));\n Output mul_op1 = Mul(root.WithOpName(\"mul_op1\"), add_op, c_op);\n Output mul_op2 = Mul(root.WithOpName(\"mul_op2\"), add_op, c_op);\n Output mul_op3 = Mul(root.WithOpName(\"mul_op3\"), add_op, c_op);\n Output final_mul_op =\n Mul(root.WithOpName(\"final_mul_op\"), mul_op2, mul_op3);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef merged_graph_def;\n TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def,\n {{\"a_op\", a_tensor}}, {{\"a_op\", a_tensor}},\n {\"mul_op1\", \"final_mul_op\"}, {}, 1.0,\n &merged_graph_def);\n std::map node_map;\n MapNamesToNodes(merged_graph_def, &node_map);\n EXPECT_EQ(1, node_map.count(\"a_op\"));\n EXPECT_EQ(1, node_map.count(\"b_op\"));\n EXPECT_EQ(1, node_map.count(\"a_relu_op\"));\n EXPECT_EQ(1, node_map.count(\"b_relu_op\"));\n EXPECT_EQ(1, node_map.count(\"mul_op1\"));\n EXPECT_EQ(1, node_map.count(\"final_mul_op\"));\n EXPECT_EQ(1, (node_map.count(\"mul_op2\") + node_map.count(\"mul_op3\")));\n string remaining_mul;\n if (node_map.count(\"mul_op2\")) {\n remaining_mul = \"mul_op2\";\n } else {\n remaining_mul = \"mul_op3\";\n }\n EXPECT_EQ(remaining_mul, node_map[\"final_mul_op\"]->input(0));\n EXPECT_EQ(remaining_mul, node_map[\"final_mul_op\"]->input(1));\n EXPECT_EQ(1, node_map.count(\"c_op\"));\n EXPECT_EQ(\"add_op\", node_map[\"mul_op1\"]->input(0));\n EXPECT_EQ(\"c_op\", node_map[\"mul_op1\"]->input(1));\n }\n void TestExcludeNonFloat() {\n auto root = tensorflow::Scope::NewRootScope();\n using namespace ::tensorflow::ops; \n Tensor int_constant_tensor(DT_INT32, TensorShape({4, 5}));\n test::FillIota(&int_constant_tensor, 1);\n Output int_constant = Const(root.WithOpName(\"int_constant\"),\n Input::Initializer(int_constant_tensor));\n Tensor float_constant_tensor(DT_FLOAT, TensorShape({4, 5}));\n test::FillIota(&float_constant_tensor, 2.0f);\n Output float_constant = Const(root.WithOpName(\"float_constant\"),\n Input::Initializer(float_constant_tensor));\n Output excluded_reshape_op =\n Reshape(root.WithOpName(\"excluded_reshape_op\"), int_constant, {10, 2});\n Output included_reshape_op = Reshape(root.WithOpName(\"included_reshape_op\"),\n float_constant, {10, 2});\n Output excluded_relu_op =\n Relu(root.WithOpName(\"excluded_relu_op\"), excluded_reshape_op);\n Output excluded_float_caster = Cast(\n root.WithOpName(\"excluded_float_caster\"), excluded_relu_op, DT_FLOAT);\n Output included_relu_op =\n Relu(root.WithOpName(\"included_relu_op\"), included_reshape_op);\n GraphDef float_graph_def;\n TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));\n GraphDef quantized_graph_def;\n TestTransformedVersusFloatGraph(\n QuantizeNodes, float_graph_def, {}, {},\n {\"excluded_float_caster\", \"included_relu_op\"}, {}, 1.0,\n &quantized_graph_def);\n std::map node_map;\n MapNamesToNodes(quantized_graph_def, &node_map);\n ASSERT_EQ(1, node_map.count(\"excluded_reshape_op\"));\n EXPECT_EQ(\"Reshape\", node_map.at(\"excluded_reshape_op\")->op());\n ASSERT_EQ(1, node_map.count(\"included_reshape_op\"));\n EXPECT_EQ(\"Dequantize\", node_map.at(\"included_reshape_op\")->op());\n }\n};\nTEST_F(QuantizeNodesTest, TestIgnoreOps) {\n TestIgnoreOps({});\n TestIgnoreOps({\"MatMul\"});\n TestIgnoreOps({\"MatMul\", \"Mul\"});\n}\nTEST_F(QuantizeNodesTest, TestQuantizeMatMulTiny) { TestQuantizeMatMulTiny(); }\nTEST_F(QuantizeNodesTest, TestQuantizeMatMulSmall) {\n TestQuantizeMatMulSmall();\n}\nTEST_F(QuantizeNodesTest, TestQuantizeMul) { TestQuantizeMul(); }\nTEST_F(QuantizeNodesTest, TestQuantizeAdd) { TestQuantizeAdd(); }\nTEST_F(QuantizeNodesTest, TestOddPaddingProblem) {\n TestQuantizeConv2D(1, 4, 4, 1, 3, 1, 2, \"SAME\",\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},\n {1, 2, 3, 4, 5, 6, 7, 8, 9});\n}\nTEST_F(QuantizeNodesTest, TestQuantizeConv2D) {\n TestQuantizeConv2D(1, 4, 3, 1, 3, 1, 1, \"SAME\",\n {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},\n {1, 4, 7, 2, 5, 8, 3, 6, 9});\n}\nTEST_F(QuantizeNodesTest, TestQuantizeBiasAdd) { TestQuantizeBiasAdd(); }\nTEST_F(QuantizeNodesTest, TestQuantizeConcat) { TestQuantizeConcat(); }\nTEST_F(QuantizeNodesTest, TestQuantizeRelu) { TestQuantizeRelu(); }\nTEST_F(QuantizeNodesTest, TestQuantizeRelu6) { TestQuantizeRelu6(); }\nTEST_F(QuantizeNodesTest, TestQuantizeMaxPool) { TestQuantizeMaxPool(); }\nTEST_F(QuantizeNodesTest, TestQuantizeAvgPool) { TestQuantizeAvgPool(); }\nTEST_F(QuantizeNodesTest, TestQuantizeReshape) { TestQuantizeReshape(); }\nTEST_F(QuantizeNodesTest, TestQuantizeResizeBilinear) {\n TestQuantizeResizeBilinear();\n}\nTEST_F(QuantizeNodesTest, TestRemoveRedundantQuantization) {\n TestRemoveRedundantQuantization();\n}\nTEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithBiasAdd) {\n TestRemoveRedundantQuantizationWithBiasAdd();\n}\nTEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithMultipleOutputs) {\n TestRemoveRedundantQuantizationWithMultipleOutputs();\n}\nTEST_F(QuantizeNodesTest, TestQuantizePlaceholders) {\n TestQuantizePlaceholders();\n}\nTEST_F(QuantizeNodesTest, TestInputRange) { TestInputRange(); }\nTEST_F(QuantizeNodesTest, TestFallbackRange) { TestFallbackRange(); }\nTEST_F(QuantizeNodesTest, TestConvertFakeQuantsToRequantize) {\n TestConvertFakeQuantsToRequantize();\n}\nTEST_F(QuantizeNodesTest, TestMergeAdjacentRequantizes) {\n TestMergeAdjacentRequantizes();\n}\nTEST_F(QuantizeNodesTest, TestConvertFakeQuantsEndToEnd) {\n TestConvertFakeQuantsEndToEnd();\n}\nTEST_F(QuantizeNodesTest, TestHoistFakeQuants) { TestHoistFakeQuants(); }\nTEST_F(QuantizeNodesTest, TestMergeDuplicateQuantizes) {\n TestMergeDuplicateQuantizes();\n}\nTEST_F(QuantizeNodesTest, TestMergeDuplicateConsts) {\n TestMergeDuplicateConsts();\n}\nTEST_F(QuantizeNodesTest, TestMergeDuplicatesNested) {\n TestMergeDuplicatesNested();\n}\nTEST_F(QuantizeNodesTest, TestMergeDuplicateInOut) {\n TestMergeDuplicatesInOut();\n}\nTEST_F(QuantizeNodesTest, TestExcludeNonFloat) { TestExcludeNonFloat(); }\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":284,"cells":{"ID":{"kind":"string","value":"2d10528f-a1a6-4888-a5d3-6ca40943cf7b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"quiche_lower_case_string"},"File Path in Repository":{"kind":"string","value":"quiche/common/platform/api/quiche_lower_case_string.h"},"File Path for Unit Test":{"kind":"string","value":"quiche/common/platform/api/quiche_lower_case_string_test.cc"},"Code":{"kind":"string","value":"#ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_\n#define QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_\n#include \"quiche_platform_impl/quiche_lower_case_string_impl.h\"\nnamespace quiche {\nusing QuicheLowerCaseString = QuicheLowerCaseStringImpl;\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/common/platform/api/quiche_lower_case_string.h\"\n#include \"absl/strings/string_view.h\"\n#include \"quiche/common/platform/api/quiche_test.h\"\nnamespace quiche::test {\nnamespace {\nTEST(QuicheLowerCaseString, Basic) {\n QuicheLowerCaseString empty(\"\");\n EXPECT_EQ(\"\", empty.get());\n QuicheLowerCaseString from_lower_case(\"foo\");\n EXPECT_EQ(\"foo\", from_lower_case.get());\n QuicheLowerCaseString from_mixed_case(\"BaR\");\n EXPECT_EQ(\"bar\", from_mixed_case.get());\n const absl::string_view kData = \"FooBar\";\n QuicheLowerCaseString from_string_view(kData);\n EXPECT_EQ(\"foobar\", from_string_view.get());\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string.h"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":285,"cells":{"ID":{"kind":"string","value":"476923f3-b0e5-4116-ab25-59dc056d648d"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"grpc_dispatcher_impl"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/data/service/grpc_dispatcher_impl.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/data/service/grpc_dispatcher_impl_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/data/service/grpc_dispatcher_impl.h\"\n#include \"grpcpp/server_context.h\"\n#include \"tensorflow/core/data/service/export.pb.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/grpc_util.h\"\n#include \"tensorflow/core/protobuf/service_config.pb.h\"\nnamespace tensorflow {\nnamespace data {\nusing ::grpc::ServerBuilder;\nusing ::grpc::ServerContext;\nGrpcDispatcherImpl::GrpcDispatcherImpl(\n const experimental::DispatcherConfig& config, ServerBuilder& server_builder)\n : impl_(config) {\n server_builder.RegisterService(this);\n VLOG(1) << \"Registered data service dispatcher\";\n}\nStatus GrpcDispatcherImpl::Start() { return impl_.Start(); }\nvoid GrpcDispatcherImpl::Stop() { impl_.Stop(); }\nsize_t GrpcDispatcherImpl::NumActiveIterations() {\n return impl_.NumActiveIterations();\n}\nDispatcherStateExport GrpcDispatcherImpl::ExportState() const {\n return impl_.ExportState();\n}\n#define HANDLER(method) \\\n grpc::Status GrpcDispatcherImpl::method(ServerContext* context, \\\n const method##Request* request, \\\n method##Response* response) { \\\n return ToGrpcStatus(impl_.method(request, response)); \\\n }\nHANDLER(WorkerHeartbeat);\nHANDLER(WorkerUpdate);\nHANDLER(GetDatasetDef);\nHANDLER(GetSplit);\nHANDLER(GetVersion);\nHANDLER(GetOrRegisterDataset);\nHANDLER(ReleaseIterationClient);\nHANDLER(MaybeRemoveTask);\nHANDLER(GetOrCreateJob);\nHANDLER(GetOrCreateIteration);\nHANDLER(ClientHeartbeat);\nHANDLER(GetWorkers);\nHANDLER(GetDataServiceMetadata);\nHANDLER(GetDataServiceConfig);\nHANDLER(Snapshot);\nHANDLER(GetSnapshotSplit);\nHANDLER(GetSnapshotStreams);\nHANDLER(DisableCompressionAtRuntime);\n#undef HANDLER\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/data/service/grpc_dispatcher_impl.h\"\n#include \n#include \n#include \n#include \n#include \"grpcpp/channel.h\"\n#include \"grpcpp/client_context.h\"\n#include \"grpcpp/create_channel.h\"\n#include \"grpcpp/security/credentials.h\"\n#include \"grpcpp/support/channel_arguments.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"tensorflow/core/data/service/common.h\"\n#include \"tensorflow/core/data/service/common.pb.h\"\n#include \"tensorflow/core/data/service/credentials_factory.h\"\n#include \"tensorflow/core/data/service/dispatcher.pb.h\"\n#include \"tensorflow/core/data/service/server_lib.h\"\n#include \"tensorflow/core/data/service/test_util.h\"\n#include \"tensorflow/core/distributed_runtime/rpc/grpc_util.h\"\n#include \"tensorflow/core/framework/graph.pb.h\"\n#include \"tensorflow/core/lib/core/status_test_util.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/statusor.h\"\n#include \"tensorflow/core/platform/types.h\"\n#include \"tensorflow/core/protobuf/data_service.pb.h\"\n#include \"tensorflow/core/protobuf/service_config.pb.h\"\nnamespace tensorflow {\nnamespace data {\nnamespace {\nusing ::grpc::Channel;\nusing ::grpc::ChannelArguments;\nusing ::grpc::ChannelCredentials;\nusing ::grpc::ClientContext;\nconstexpr const char kHostAddress[] = \"localhost\";\nconstexpr const char kProtocol[] = \"grpc\";\nclass GrpcDispatcherImplTest : public ::testing::Test {\n protected:\n void SetUp() override {\n TF_ASSERT_OK(SetUpDispatcherServer());\n TF_ASSERT_OK(SetUpDispatcherClientStub());\n }\n Status SetUpDispatcherServer() {\n experimental::DispatcherConfig config;\n config.set_protocol(kProtocol);\n TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));\n return dispatcher_server_->Start();\n }\n Status SetUpDispatcherClientStub() {\n std::shared_ptr credentials;\n TF_RETURN_IF_ERROR(\n CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));\n ChannelArguments args;\n args.SetMaxReceiveMessageSize(std::numeric_limits::max());\n args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);\n std::shared_ptr channel =\n ::grpc::CreateCustomChannel(GetDispatcherAddress(), credentials, args);\n dispatcher_client_stub_ = DispatcherService::NewStub(channel);\n return absl::OkStatus();\n }\n std::string GetDispatcherAddress() const {\n return absl::StrCat(kHostAddress, \":\", dispatcher_server_->BoundPort());\n }\n std::unique_ptr dispatcher_server_;\n std::unique_ptr dispatcher_client_stub_;\n};\nTEST_F(GrpcDispatcherImplTest, GrpcTest) {\n ClientContext ctx;\n GetVersionRequest req;\n GetVersionResponse resp;\n TF_ASSERT_OK(\n FromGrpcStatus(dispatcher_client_stub_->GetVersion(&ctx, req, &resp)));\n EXPECT_EQ(resp.version(), kDataServiceVersion);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":286,"cells":{"ID":{"kind":"string","value":"145e02f9-e296-4fae-91d2-7cfe3e2ddd1e"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"mkl_conv_ops"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/kernels/mkl/mkl_conv_ops.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc"},"Code":{"kind":"string","value":"#ifdef INTEL_MKL\n#include \"tensorflow/core/kernels/mkl/mkl_conv_ops.h\"\n#include \n#include \n#include \n#include \n#include \"absl/strings/str_join.h\"\n#include \"tensorflow/core/kernels/mkl/mkl_kernel_util.h\"\n#include \"tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h\"\n#include \"tensorflow/core/kernels/no_op.h\"\n#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)\n#include \"tensorflow/core/platform/mutex.h\"\n#endif\nusing dnnl::convolution_forward;\nusing dnnl::prop_kind;\nusing dnnl::stream;\nusing ConvFwdPd = dnnl::convolution_forward::primitive_desc;\nusing ReorderPd = dnnl::reorder::primitive_desc;\nnamespace tensorflow {\n#ifndef ENABLE_ONEDNN_V3\n#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \\\n scales_mask, scales) \\\n append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding, scales_mask, \\\n scales)\n#define APPEND_ELTWISE(scale, alg, alpha, beta) \\\n append_eltwise(scale, alg, alpha, beta)\n#define GET_DATA_TYPE data_type()\n#define SET_FUSE_ACTIVATION_FOR_RELU6 \\\n set_fuse_activation(true, dnnl::algorithm::eltwise_bounded_relu, 6.0)\n#define SET_MKL_LAYOUT(md) SetMklLayout(&md)\n#define OUTPUT_SCALE_DCHECK (post_op_param.name == \"output_scale\")\n#define TSCALED_BIAS Tbias\n#define SCALE scales\n#define SUMMAND_SCALE_U8(summand_range, output_range) \\\n summand_range / output_range\n#define SUMMAND_SCALE_S8(summand_range, output_range) \\\n 255.0f * summand_range / (output_range * 127.0f)\n#else\n#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \\\n scales_mask, scales) \\\n append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding)\n#define APPEND_ELTWISE(scale, alg, alpha, beta) \\\n append_eltwise(alg, alpha, beta); \\\n (void)scale\n#define GET_DATA_TYPE get_data_type()\n#define SET_FUSE_ACTIVATION_FOR_RELU6 \\\n set_fuse_activation(true, dnnl::algorithm::eltwise_clip, 0.0, 6.0)\n#define SET_MKL_LAYOUT(md) SetMklLayout(md)\n#define OUTPUT_SCALE_DCHECK \\\n (post_op_param.name == \"src_scale\") || \\\n (post_op_param.name == \"wei_scale\") || \\\n (post_op_param.name == \"dst_scale\")\n#define TSCALED_BIAS float\n#define SCALE wei_scale\n#define SUMMAND_SCALE_U8(summand_range, output_range) summand_range / 255.0f\n#define SUMMAND_SCALE_S8(summand_range, output_range) summand_range / 127.0f\n#endif \n#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)\n#define FWD_STREAM , *fwd_stream\n#else\n#define FWD_STREAM\n#endif \nnamespace quantized_fusions {\nstring none[] = {\"\"};\nstring bias[] = {\"BiasAdd\"};\nstring relu[] = {\"Relu\"};\nstring requantize[] = {\"Requantize\"};\nstring bias_relu[] = {\"BiasAdd\", \"Relu\"};\nstring bias_requantize[] = {\"BiasAdd\", \"Requantize\"};\nstring relu_requantize[] = {\"Relu\", \"Requantize\"};\nstring bias_relu_requantize[] = {\"BiasAdd\", \"Relu\", \"Requantize\"};\nstring bias_sum_relu[] = {\"BiasAdd\", \"Sum\", \"Relu\"};\nstring bias_sum_relu_requantize[] = {\"BiasAdd\", \"Sum\", \"Relu\", \"Requantize\"};\n} \nstruct MklConvFwdParams {\n memory::dims src_dims;\n memory::dims filter_dims;\n memory::dims bias_dims;\n memory::dims dst_dims;\n memory::dims strides;\n memory::dims dilations;\n memory::dims padding_left;\n memory::dims padding_right;\n memory::dims fuse_bn_dims;\n MklTensorFormat tf_fmt;\n bool native_format;\n bool is_depthwise;\n bool is_filter_const = false;\n string dtypes = string(\"\");\n struct PostOpParam {\n string name;\n dnnl::algorithm alg;\n std::vector param;\n std::string partial_key;\n DataType dtype = DT_INVALID;\n };\n std::vector post_op_params;\n MklConvFwdParams(memory::dims src_dims, memory::dims filter_dims,\n memory::dims bias_dims, memory::dims dst_dims,\n memory::dims strides, memory::dims dilations,\n memory::dims padding_left, memory::dims padding_right,\n memory::dims fuse_bn_dims, MklTensorFormat tf_fmt,\n bool native_format, bool is_depthwise, bool is_filter_const)\n : src_dims(src_dims),\n filter_dims(filter_dims),\n bias_dims(bias_dims),\n dst_dims(dst_dims),\n strides(strides),\n dilations(dilations),\n padding_left(padding_left),\n padding_right(padding_right),\n fuse_bn_dims(fuse_bn_dims),\n tf_fmt(tf_fmt),\n native_format(native_format),\n is_depthwise(is_depthwise),\n is_filter_const(is_filter_const) {}\n};\ntemplate \nclass MklConvFwdPrimitive : public MklPrimitive {\n public:\n explicit MklConvFwdPrimitive(const MklConvFwdParams& convFwdDims)\n : MklPrimitive(engine(engine::kind::cpu, 0)) {\n if (context_.conv_fwd == nullptr) {\n Setup(convFwdDims);\n }\n }\n ~MklConvFwdPrimitive() {}\n dnnl::memory::desc GetScratchPadDesc() {\n return context_.fwd_pd->scratchpad_desc();\n }\n void Execute(const Tinput* src_data, const Tfilter* filter_data,\n const void* bias_data, const Toutput* dst_data,\n const MklConvFwdParams& convFwdDims,\n std::shared_ptr fwd_stream, void* sp_data = nullptr) {\n Execute(src_data, filter_data, bias_data, dst_data, nullptr, nullptr,\n nullptr, nullptr, convFwdDims, fwd_stream, sp_data);\n }\n void Execute(const Tinput* src_data, const Tfilter* filter_data,\n const void* bias_data, const Toutput* dst_data,\n const Tinput* bn_scale_data, const Tinput* bn_mean_data,\n const Tinput* bn_offset_data, const Tinput* bn_rsqrt_data,\n const MklConvFwdParams& convFwdDims,\n std::shared_ptr fwd_stream, void* sp_data) {\n#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)\n mutex_lock lock(primitive_execution_mu_);\n#endif\n context_.src_mem->set_data_handle(\n static_cast(const_cast(src_data)) FWD_STREAM);\n context_.filter_mem->set_data_handle(\n static_cast(const_cast(filter_data)) FWD_STREAM);\n if (bias_data != nullptr) {\n context_.bias_mem->set_data_handle(const_cast(bias_data)\n FWD_STREAM);\n }\n auto const& post_op_params = convFwdDims.post_op_params;\n if (!post_op_params.empty()) {\n for (auto const& post_op_param : post_op_params) {\n if (post_op_param.name == \"src_scale\") {\n context_.src_scale_mem->set_data_handle(static_cast(\n const_cast(post_op_param.param.data())) FWD_STREAM);\n } else if (post_op_param.name == \"wei_scale\") {\n context_.wei_scale_mem->set_data_handle(static_cast(\n const_cast(post_op_param.param.data())) FWD_STREAM);\n } else if (post_op_param.name == \"dst_scale\") {\n context_.dst_scale_mem->set_data_handle(static_cast(\n const_cast(post_op_param.param.data())) FWD_STREAM);\n }\n }\n }\n if (bn_scale_data != nullptr) {\n context_.bn_scale_mem->set_data_handle(\n static_cast(const_cast(bn_scale_data)) FWD_STREAM);\n context_.bn_mean_mem->set_data_handle(\n static_cast(const_cast(bn_mean_data)) FWD_STREAM);\n context_.bn_rsqrt_mem->set_data_handle(\n static_cast(const_cast(bn_rsqrt_data)) FWD_STREAM);\n context_.bn_offset_mem->set_data_handle(\n static_cast(const_cast(bn_offset_data)) FWD_STREAM);\n }\n context_.dst_mem->set_data_handle(\n static_cast(const_cast(dst_data)) FWD_STREAM);\n if (sp_data) {\n context_.sp_mem->set_data_handle(static_cast(sp_data) FWD_STREAM);\n }\n DCHECK_EQ(context_.fwd_primitives.size(),\n context_.fwd_primitives_args.size());\n for (size_t i = 0; i < context_.fwd_primitives.size(); ++i) {\n context_.fwd_primitives.at(i).execute(*fwd_stream,\n context_.fwd_primitives_args.at(i));\n }\n context_.src_mem->set_data_handle(DummyData);\n context_.filter_mem->set_data_handle(DummyData);\n if (bias_data != nullptr) {\n context_.bias_mem->set_data_handle(DummyData);\n }\n if (bn_scale_data != nullptr) {\n context_.bn_scale_mem->set_data_handle(DummyData);\n context_.bn_mean_mem->set_data_handle(DummyData);\n context_.bn_rsqrt_mem->set_data_handle(DummyData);\n context_.bn_offset_mem->set_data_handle(DummyData);\n }\n context_.dst_mem->set_data_handle(DummyData);\n if (sp_data) {\n context_.sp_mem->set_data_handle(DummyData);\n }\n }\n void Execute(const Tinput* src_data, const Tfilter* filter_data,\n const Toutput* dst_data, const MklConvFwdParams& convFwdDims,\n std::shared_ptr fwd_stream, void* sp_data) {\n Execute(src_data, filter_data, nullptr, dst_data, nullptr, nullptr, nullptr,\n nullptr, convFwdDims, fwd_stream, sp_data);\n }\n std::shared_ptr GetPrimitiveDesc() const {\n return context_.fwd_pd;\n }\n private:\n struct ConvFwdContext {\n std::shared_ptr src_mem;\n std::shared_ptr filter_mem;\n std::shared_ptr bias_mem;\n std::shared_ptr dst_mem;\n std::shared_ptr sp_mem;\n std::shared_ptr bn_scale_mem;\n std::shared_ptr bn_mean_mem;\n std::shared_ptr bn_rsqrt_mem;\n std::shared_ptr bn_offset_mem;\n std::shared_ptr src_scale_mem;\n std::shared_ptr wei_scale_mem;\n std::shared_ptr dst_scale_mem;\n#ifndef ENABLE_ONEDNN_V3\n std::shared_ptr fwd_desc;\n#endif \n std::shared_ptr fwd_pd;\n std::shared_ptr src_md;\n std::shared_ptr filter_md;\n std::shared_ptr bias_md;\n std::shared_ptr dst_md;\n std::shared_ptr bn_scale_md;\n std::shared_ptr bn_mean_md;\n std::shared_ptr bn_rsqrt_md;\n std::shared_ptr bn_offset_md;\n std::shared_ptr src_scale_md;\n std::shared_ptr wei_scale_md;\n std::shared_ptr dst_scale_md;\n std::shared_ptr conv_fwd;\n std::vector fwd_primitives;\n std::vector> fwd_primitives_args;\n ConvFwdContext()\n : src_mem(nullptr),\n filter_mem(nullptr),\n bias_mem(nullptr),\n dst_mem(nullptr),\n sp_mem(nullptr),\n bn_scale_mem(nullptr),\n bn_mean_mem(nullptr),\n bn_rsqrt_mem(nullptr),\n bn_offset_mem(nullptr),\n src_scale_mem(nullptr),\n wei_scale_mem(nullptr),\n dst_scale_mem(nullptr),\n#ifndef ENABLE_ONEDNN_V3\n fwd_desc(nullptr),\n#endif \n fwd_pd(nullptr),\n src_md(nullptr),\n filter_md(nullptr),\n bias_md(nullptr),\n dst_md(nullptr),\n bn_scale_md(nullptr),\n bn_mean_md(nullptr),\n bn_rsqrt_md(nullptr),\n bn_offset_md(nullptr),\n src_scale_md(nullptr),\n wei_scale_md(nullptr),\n dst_scale_md(nullptr),\n conv_fwd(nullptr) {\n }\n };\n void Setup(const MklConvFwdParams& convFwdDims) {\n memory::format_tag user_data_fmt;\n if (convFwdDims.native_format) {\n user_data_fmt = MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);\n } else {\n user_data_fmt = memory::format_tag::any;\n }\n context_.src_md.reset(new memory::desc(\n {convFwdDims.src_dims}, MklDnnType(), user_data_fmt));\n if (convFwdDims.filter_dims.size() == 4 && !convFwdDims.is_filter_const &&\n std::is_same::value &&\n convFwdDims.src_dims[MklDnnDims::Dim_N] == 1) {\n context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},\n MklDnnType(),\n memory::format_tag::hwio));\n } else {\n context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},\n MklDnnType(),\n memory::format_tag::any));\n }\n context_.dst_md.reset(new memory::desc(\n {convFwdDims.dst_dims}, MklDnnType(), user_data_fmt));\n if (!convFwdDims.bias_dims.empty()) {\n if (std::is_same::value) {\n context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},\n MklDnnType(),\n memory::format_tag::any));\n } else {\n context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},\n MklDnnType(),\n memory::format_tag::any));\n }\n#ifndef ENABLE_ONEDNN_V3\n context_.fwd_desc.reset(new convolution_forward::desc(\n prop_kind::forward, dnnl::algorithm::convolution_direct,\n *context_.src_md, *context_.filter_md, *context_.bias_md,\n *context_.dst_md, convFwdDims.strides, convFwdDims.dilations,\n convFwdDims.padding_left, convFwdDims.padding_right));\n } else {\n context_.fwd_desc.reset(new convolution_forward::desc(\n prop_kind::forward, dnnl::algorithm::convolution_direct,\n *context_.src_md, *context_.filter_md, *context_.dst_md,\n convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,\n convFwdDims.padding_right));\n#endif \n }\n if (!convFwdDims.fuse_bn_dims.empty()) {\n const memory::format_tag fused_bn_arg_fmt =\n convFwdDims.native_format\n ? user_data_fmt\n : MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);\n context_.bn_scale_md.reset(new memory::desc(\n {convFwdDims.fuse_bn_dims}, MklDnnType(), fused_bn_arg_fmt));\n context_.bn_mean_md.reset(new memory::desc(\n {convFwdDims.fuse_bn_dims}, MklDnnType(), fused_bn_arg_fmt));\n context_.bn_rsqrt_md.reset(new memory::desc(\n {convFwdDims.fuse_bn_dims}, MklDnnType(), fused_bn_arg_fmt));\n context_.bn_offset_md.reset(new memory::desc(\n {convFwdDims.fuse_bn_dims}, MklDnnType(), fused_bn_arg_fmt));\n }\n auto const& post_op_params = convFwdDims.post_op_params;\n dnnl::primitive_attr post_ops_attr;\n dnnl::post_ops post_ops;\n post_ops_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);\n std::unordered_map is_scale_set;\n if (!post_op_params.empty()) {\n for (auto const& post_op_param : post_op_params) {\n if (post_op_param.name == \"activation\") {\n DCHECK_EQ(post_op_param.param.size(), 3);\n float op_scale = post_op_param.param[0];\n float op_alpha = post_op_param.param[1];\n float op_beta = post_op_param.param[2];\n post_ops.APPEND_ELTWISE(op_scale, post_op_param.alg, op_alpha,\n op_beta);\n } else if (post_op_param.name == \"sum\") {\n DCHECK_EQ(post_op_param.param.size(), 1);\n float op_scale = post_op_param.param[0];\n#ifndef ENABLE_ONEDNN_V3\n post_ops.append_sum(op_scale);\n#else\n if (post_op_param.dtype != DT_INVALID) {\n if (post_op_param.dtype == DT_FLOAT) {\n post_ops.append_sum(op_scale, 0,\n MklDnnType());\n } else {\n TF_CHECK_OK(absl::FailedPreconditionError(\n \"Summand data type is expected to be float\"));\n }\n } else {\n post_ops.append_sum(op_scale);\n }\n#endif \n#ifndef ENABLE_ONEDNN_V3\n } else if (post_op_param.name == \"output_scale\") {\n if (post_op_param.param.size() == 1) {\n post_ops_attr.set_output_scales(0, post_op_param.param);\n } else {\n post_ops_attr.set_output_scales(2, post_op_param.param);\n }\n#else\n } else if (post_op_param.name == \"src_scale\") {\n is_scale_set.insert({\"src\", true});\n post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0);\n context_.src_scale_md.reset(new memory::desc({1}, MklDnnType(),\n memory::format_tag::x));\n context_.src_scale_mem.reset(\n new memory(*context_.src_scale_md, cpu_engine_, DummyData));\n } else if (post_op_param.name == \"wei_scale\") {\n is_scale_set.insert({\"wei\", true});\n const int scale_size = post_op_param.param.size();\n const int mask = scale_size == 1 ? 0\n : convFwdDims.is_depthwise ? 3\n : 1;\n post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask);\n context_.wei_scale_md.reset(new memory::desc(\n {scale_size}, MklDnnType(), memory::format_tag::x));\n context_.wei_scale_mem.reset(\n new memory(*context_.wei_scale_md, cpu_engine_, DummyData));\n } else if (post_op_param.name == \"dst_scale\") {\n is_scale_set.insert({\"dst\", true});\n post_ops_attr.set_scales_mask(DNNL_ARG_DST, 0);\n context_.dst_scale_md.reset(new memory::desc({1}, MklDnnType(),\n memory::format_tag::x));\n context_.dst_scale_mem.reset(\n new memory(*context_.dst_scale_md, cpu_engine_, DummyData));\n#endif \n } else if (post_op_param.name == \"fuse_bn\") {\n post_ops.append_binary(dnnl::algorithm::binary_sub,\n *context_.bn_mean_md);\n post_ops.append_binary(dnnl::algorithm::binary_mul,\n *context_.bn_rsqrt_md);\n post_ops.append_binary(dnnl::algorithm::binary_mul,\n *context_.bn_scale_md);\n post_ops.append_binary(dnnl::algorithm::binary_add,\n *context_.bn_offset_md);\n } else {\n DCHECK((post_op_param.name == \"activation\") ||\n (post_op_param.name == \"sum\") || OUTPUT_SCALE_DCHECK ||\n (post_op_param.name == \"fuse_bn\"));\n }\n }\n post_ops_attr.set_post_ops(post_ops);\n }\n#ifndef ENABLE_ONEDNN_V3\n context_.fwd_pd.reset(\n new ConvFwdPd(*context_.fwd_desc, post_ops_attr, cpu_engine_));\n#else\n if (!convFwdDims.bias_dims.empty()) {\n context_.fwd_pd.reset(new ConvFwdPd(\n cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,\n *context_.src_md, *context_.filter_md, *context_.bias_md,\n *context_.dst_md, convFwdDims.strides, convFwdDims.dilations,\n convFwdDims.padding_left, convFwdDims.padding_right, post_ops_attr));\n } else {\n context_.fwd_pd.reset(new ConvFwdPd(\n cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,\n *context_.src_md, *context_.filter_md, *context_.dst_md,\n convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,\n convFwdDims.padding_right, post_ops_attr));\n }\n#endif \n context_.src_mem.reset(\n new memory(context_.fwd_pd.get()->src_desc(), cpu_engine_, DummyData));\n context_.filter_mem.reset(new memory(context_.fwd_pd.get()->weights_desc(),\n cpu_engine_, DummyData));\n context_.dst_mem.reset(\n new memory(context_.fwd_pd.get()->dst_desc(), cpu_engine_, DummyData));\n context_.conv_fwd.reset(new convolution_forward(*context_.fwd_pd));\n auto scratchpad_md = context_.fwd_pd->scratchpad_desc();\n context_.sp_mem.reset(\n new dnnl::memory(scratchpad_md, cpu_engine_, DummyData));\n std::unordered_map net_args;\n if (!convFwdDims.bias_dims.empty()) {\n context_.bias_mem.reset(new memory(context_.fwd_pd.get()->bias_desc(),\n cpu_engine_, DummyData));\n net_args = {{DNNL_ARG_SRC, *context_.src_mem},\n {DNNL_ARG_WEIGHTS, *context_.filter_mem},\n {DNNL_ARG_BIAS, *context_.bias_mem},\n {DNNL_ARG_SCRATCHPAD, *context_.sp_mem},\n {DNNL_ARG_DST, *context_.dst_mem}};\n#ifdef ENABLE_ONEDNN_V3\n if (is_scale_set[\"src\"] && is_scale_set[\"wei\"] && is_scale_set[\"dst\"]) {\n net_args.insert(\n {{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},\n {DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},\n { DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,\n *context_.dst_scale_mem }});\n }\n#endif \n } else if (!convFwdDims.fuse_bn_dims.empty()) {\n context_.bn_scale_mem.reset(\n new memory(*context_.bn_scale_md, cpu_engine_, DummyData));\n context_.bn_mean_mem.reset(\n new memory(*context_.bn_mean_md, cpu_engine_, DummyData));\n context_.bn_offset_mem.reset(\n new memory(*context_.bn_offset_md, cpu_engine_, DummyData));\n context_.bn_rsqrt_mem.reset(\n new memory(*context_.bn_rsqrt_md, cpu_engine_, DummyData));\n net_args = {{DNNL_ARG_SRC, *context_.src_mem},\n {DNNL_ARG_WEIGHTS, *context_.filter_mem},\n {DNNL_ARG_DST, *context_.dst_mem},\n {DNNL_ARG_SCRATCHPAD, *context_.sp_mem},\n {DNNL_ARG_ATTR_MULTIPLE_POST_OP(0) | DNNL_ARG_SRC_1,\n *context_.bn_mean_mem},\n {DNNL_ARG_ATTR_MULTIPLE_POST_OP(1) | DNNL_ARG_SRC_1,\n *context_.bn_rsqrt_mem},\n {DNNL_ARG_ATTR_MULTIPLE_POST_OP(2) | DNNL_ARG_SRC_1,\n *context_.bn_scale_mem},\n {DNNL_ARG_ATTR_MULTIPLE_POST_OP(3) | DNNL_ARG_SRC_1,\n *context_.bn_offset_mem}};\n } else {\n net_args = {{DNNL_ARG_SRC, *context_.src_mem},\n {DNNL_ARG_WEIGHTS, *context_.filter_mem},\n {DNNL_ARG_SCRATCHPAD, *context_.sp_mem},\n {DNNL_ARG_DST, *context_.dst_mem}};\n#ifdef ENABLE_ONEDNN_V3\n if (is_scale_set[\"src\"] && is_scale_set[\"wei\"] && is_scale_set[\"dst\"]) {\n net_args.insert(\n {{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},\n {DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},\n { DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,\n *context_.dst_scale_mem }});\n }\n#endif \n }\n context_.fwd_primitives_args.push_back(net_args);\n context_.fwd_primitives.push_back(*context_.conv_fwd);\n }\n struct ConvFwdContext context_;\n#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)\n mutex primitive_execution_mu_;\n#endif\n};\ntemplate \nclass MklConvFwdPrimitiveFactory : public MklPrimitiveFactory {\n public:\n static MklConvFwdPrimitive* Get(\n const MklConvFwdParams& convFwdDims, bool do_not_cache) {\n MklConvFwdPrimitive* conv_fwd = nullptr;\n if (do_not_cache) {\n conv_fwd =\n new MklConvFwdPrimitive(convFwdDims);\n } else {\n conv_fwd =\n dynamic_cast*>(\n MklConvFwdPrimitiveFactory::GetInstance()\n .GetConvFwd(convFwdDims));\n if (conv_fwd == nullptr) {\n conv_fwd = new MklConvFwdPrimitive(\n convFwdDims);\n MklConvFwdPrimitiveFactory::GetInstance()\n .SetConvFwd(convFwdDims, conv_fwd);\n }\n }\n return conv_fwd;\n }\n private:\n MklConvFwdPrimitiveFactory() {}\n ~MklConvFwdPrimitiveFactory() {}\n static const int kDilationH = 0, kDilationW = 1;\n static MklConvFwdPrimitiveFactory& GetInstance() {\n static MklConvFwdPrimitiveFactory instance_;\n return instance_;\n }\n static string CreateKey(const MklConvFwdParams& convFwdDims) {\n string prefix = \"conv_fwd_\";\n FactoryKeyCreator key_creator;\n key_creator.AddAsKey(prefix);\n key_creator.AddAsKey(convFwdDims.src_dims);\n key_creator.AddAsKey(convFwdDims.filter_dims);\n key_creator.AddAsKey(convFwdDims.bias_dims);\n key_creator.AddAsKey(convFwdDims.dst_dims);\n key_creator.AddAsKey(convFwdDims.strides);\n key_creator.AddAsKey(convFwdDims.dilations);\n key_creator.AddAsKey(convFwdDims.padding_left);\n key_creator.AddAsKey(convFwdDims.padding_right);\n key_creator.AddAsKey(convFwdDims.dtypes);\n if (convFwdDims.native_format) {\n key_creator.AddAsKey(convFwdDims.tf_fmt);\n }\n for (auto const& post_op_param : convFwdDims.post_op_params) {\n key_creator.AddAsKey(post_op_param.name);\n if (post_op_param.name == \"activation\") {\n key_creator.AddAsKey(post_op_param.alg);\n DCHECK_EQ(post_op_param.param.size(), 3);\n for (auto& param : post_op_param.param) {\n key_creator.AddAsKey(param);\n }\n } else if (post_op_param.name == \"sum\") {\n DCHECK_EQ(post_op_param.param.size(), 1);\n for (auto& param : post_op_param.param) {\n key_creator.AddAsKey(param);\n }\n#ifndef ENABLE_ONEDNN_V3\n } else if (post_op_param.name == \"output_scale\") {\n#else\n } else if (post_op_param.name == \"src_scale\" ||\n post_op_param.name == \"wei_scale\" ||\n post_op_param.name == \"dst_scale\") {\n#endif \n key_creator.AddAsKey(post_op_param.partial_key);\n } else if (post_op_param.name == \"fuse_bn\") {\n key_creator.AddAsKey(post_op_param.name);\n key_creator.AddAsKey(convFwdDims.fuse_bn_dims);\n } else {\n return string(\"not_a_key\");\n }\n }\n return key_creator.GetKey();\n }\n MklPrimitive* GetConvFwd(const MklConvFwdParams& convFwdDims) {\n string key = CreateKey(convFwdDims);\n return this->GetOp(key);\n }\n void SetConvFwd(const MklConvFwdParams& convFwdDims, MklPrimitive* op) {\n string key = CreateKey(convFwdDims);\n this->SetOp(key, op);\n }\n};\ntemplate \nclass MklConvOp : public OpKernel {\n public:\n ~MklConvOp() {}\n explicit MklConvOp(OpKernelConstruction* context) : OpKernel(context) {\n OP_REQUIRES_OK(context, context->GetAttr(\"dilations\", &dilations_));\n OP_REQUIRES(\n context,\n !(context->HasAttr(\"padding_list\") &&\n context->HasAttr(\"explicit_paddings\")),\n absl::InvalidArgumentError(\"Can only have 1 `padding` list at most\"));\n if (context->HasAttr(\"padding_list\")) {\n OP_REQUIRES_OK(context, context->GetAttr(\"padding_list\", &padding_list_));\n }\n if (context->HasAttr(\"explicit_paddings\")) {\n OP_REQUIRES_OK(context,\n context->GetAttr(\"explicit_paddings\", &padding_list_));\n }\n OP_REQUIRES_OK(context, context->GetAttr(\"strides\", &strides_));\n OP_REQUIRES_OK(context, context->GetAttr(\"data_format\", &data_format_str_));\n OP_REQUIRES(context, FormatFromString(data_format_str_, &data_format_),\n absl::InvalidArgumentError(\"Invalid data format\"));\n OP_REQUIRES(context, (strides_.size() == 4 || strides_.size() == 5),\n absl::InvalidArgumentError(\"Sliding window strides field must \"\n \"specify 4 or 5 dimensions\"));\n const int64 stride_n = GetTensorDim(strides_, data_format_, 'N');\n const int64 stride_c = GetTensorDim(strides_, data_format_, 'C');\n OP_REQUIRES(\n context, stride_n == 1 && stride_c == 1,\n absl::UnimplementedError(\"Current implementation does not yet support \"\n \"strides in the batch and depth dimensions.\"));\n OP_REQUIRES_OK(context, context->GetAttr(\"padding\", &padding_));\n is_filter_const_ = false;\n if (AreWeightsFrozen()) {\n is_filter_const_ = true;\n } else if (context->HasAttr(\"is_filter_const\")) {\n OP_REQUIRES_OK(context,\n context->GetAttr(\"is_filter_const\", &is_filter_const_));\n }\n if (strides_.size() == 4) {\n OP_REQUIRES(\n context, dilations_.size() == 4,\n absl::InvalidArgumentError(\"Sliding window dilations field must \"\n \"specify 4 dimensions\"));\n const int64 dilation_n = GetTensorDim(dilations_, data_format_, 'N');\n const int64 dilation_c = GetTensorDim(dilations_, data_format_, 'C');\n const int64 dilation_h = GetTensorDim(dilations_, data_format_, 'H');\n const int64 dilation_w = GetTensorDim(dilations_, data_format_, 'W');\n OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1,\n absl::InvalidArgumentError(\n \"Current implementation does not yet support \"\n \"dilations in the batch and depth dimensions.\"));\n OP_REQUIRES(\n context, dilation_h > 0 && dilation_w > 0,\n absl::InvalidArgumentError(\"Dilated rates should be larger than 0.\"));\n } else if (strides_.size() == 5) {\n OP_REQUIRES(context, dilations_.size() == 5,\n absl::InvalidArgumentError(\"Dilation rates field must \"\n \"specify 5 dimensions\"));\n OP_REQUIRES(context,\n (GetTensorDim(dilations_, data_format_, 'N') == 1 &&\n GetTensorDim(dilations_, data_format_, 'C') == 1),\n absl::InvalidArgumentError(\n \"Current implementation does not yet support \"\n \"dilations rates in the batch and depth dimensions.\"));\n OP_REQUIRES(\n context,\n (GetTensorDim(dilations_, data_format_, '0') > 0 &&\n GetTensorDim(dilations_, data_format_, '1') > 0 &&\n GetTensorDim(dilations_, data_format_, '2') > 0),\n absl::InvalidArgumentError(\"Dilated rates should be larger than 0.\"));\n }\n }\n void Compute(OpKernelContext* context) override {\n try {\n const Tensor& src_tensor = MklGetInput(context, kInputIndex_Src);\n const Tensor& filter_tensor = MklGetInput(context, kInputIndex_Filter);\n OP_REQUIRES(\n context, filter_tensor.NumElements() > 0,\n absl::InvalidArgumentError(\"filter must not have zero elements \"\n \"(i.e. all dimensions must be non-zero)\"));\n if (std::is_same::value) {\n (void)SetFPMathMode();\n }\n MklDnnShape src_mkl_shape, filter_mkl_shape;\n GetMklShape(context, kInputIndex_Src, &src_mkl_shape, native_format);\n GetMklShape(context, kInputIndex_Filter, &filter_mkl_shape,\n native_format);\n OP_REQUIRES(context, !filter_mkl_shape.IsMklTensor(),\n absl::InvalidArgumentError(\"Filter should not be in \"\n \"Mkl Layout\"));\n MklDnnData src(&cpu_engine_);\n MklDnnData filter(&cpu_engine_);\n memory::dims src_dims, filter_dims, padding_left, padding_right,\n dilations, strides;\n memory::dims dst_dims_tf_order, dst_dims_mkl_order;\n bool pad_attr_enabled = false;\n for (auto const& padding_val : padding_list_) {\n if (padding_val) {\n pad_attr_enabled = true;\n break;\n }\n }\n if (fuse_pad_ || pad_attr_enabled) {\n PadWithConvFusion(context, padding_left, padding_right,\n pad_attr_enabled, data_format_str_);\n }\n MklDnnConvUtil conv_utl(context, strides_, padding_, data_format_,\n dilations_);\n auto src_tf_shape = GetTfShape(context, kInputIndex_Src, native_format);\n auto filter_tf_shape =\n GetTfShape(context, kInputIndex_Filter, native_format);\n bool is_grouped_convolution = false;\n conv_utl.GetConvFwdSizesInMklOrder(\n src_tf_shape, filter_tf_shape, &src_dims, &filter_dims, &strides,\n &dilations, &dst_dims_tf_order, &dst_dims_mkl_order, &padding_left,\n &padding_right, &is_grouped_convolution,\n (fuse_pad_ || pad_attr_enabled), is_depthwise);\n if (!context->status().ok()) return;\n TensorShape dst_tf_shape = MklDnnDimsToTFShape(dst_dims_tf_order);\n Tensor* dst_tensor = nullptr;\n bool emit_filter_output = (typeid(Tinput) == typeid(Tfilter) &&\n typeid(Tinput) == typeid(Toutput) &&\n (typeid(Tinput) == typeid(float) ||\n typeid(Tinput) == typeid(bfloat16))) &&\n !native_format;\n if (dst_tf_shape.num_elements() == 0 || dst_dims_tf_order[0] == 0) {\n MklDnnShape dst_mkl_shape;\n dst_mkl_shape.SetMklTensor(false);\n AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor,\n src_tf_shape, dst_mkl_shape, native_format);\n filter_mkl_shape.SetMklTensor(false);\n Tensor* output_filter_tensor = nullptr;\n if (emit_filter_output) {\n filter_mkl_shape.SetMklTensor(false);\n AllocateOutputSetMklShape(context, kOutputIndex_Filter,\n &output_filter_tensor, filter_tf_shape,\n filter_mkl_shape);\n }\n return;\n }\n bool is_conv2d = (strides_.size() == 4);\n bool is_conv3d = (strides_.size() == 5);\n if (!is_conv2d && !is_conv3d) {\n OP_REQUIRES(context, !pad_enabled,\n absl::InvalidArgumentError(\n \"Pad + Conv fusion only works for 2D/3D\"));\n OP_REQUIRES(\n context, !fuse_pad_,\n absl::InvalidArgumentError(\"Pad+Conv fusion only works for 2D/3D\"));\n }\n if (is_depthwise) {\n OP_REQUIRES(context, is_conv2d,\n absl::InvalidArgumentError(\n \"Only 2D convolution is supported for depthwise.\"));\n }\n auto tf_fmt = is_conv2d ? TFDataFormatToMklDnnDataFormat(data_format_)\n : TFDataFormatToMklDnn3DDataFormat(data_format_);\n auto mkl_fmt_tag = MklTensorFormatToMklDnnDataFormat(tf_fmt);\n OP_REQUIRES(context, mkl_fmt_tag != memory::format_tag::undef,\n absl::InvalidArgumentError(\"Invalid data format\"));\n auto src_md =\n src_mkl_shape.IsMklTensor()\n ? src_mkl_shape.GetMklLayout()\n : memory::desc(src_dims, MklDnnType(), mkl_fmt_tag);\n src.SetUsrMem(src_md, &src_tensor);\n auto filter_format = is_conv2d ? ((is_depthwise || is_grouped_convolution)\n ? memory::format_tag::hwigo\n : memory::format_tag::hwio)\n : memory::format_tag::dhwio;\n DCHECK(!filter_mkl_shape.IsMklTensor());\n auto filter_md =\n filter_mkl_shape.IsMklTensor()\n ? filter_mkl_shape.GetMklLayout()\n : memory::desc(filter_dims, MklDnnType(), filter_format);\n filter.SetUsrMem(filter_md, &filter_tensor);\n for (int i = 0; i < dilations.size(); ++i) --dilations[i];\n bool do_not_cache =\n MklPrimitiveFactory::IsPrimitiveMemOptEnabled() &&\n (src_dims[MklDnnDims::Dim_N] > kSmallBatchSize) &&\n (MklPrimitiveFactory::IsLegacyPlatform() ||\n IsConv1x1StrideNot1(filter_dims, strides));\n MklConvFwdPrimitive* conv_fwd =\n nullptr;\n memory::dims bias_dims = {};\n if (fuse_biasadd_) {\n conv_utl.GetBiasSizeInMklOrder(kInputIndex_Bias, &bias_dims);\n }\n memory::dims fuse_bn_dims = {};\n TensorShape fuse_bn_shape;\n if (fuse_bn_) {\n fuse_bn_shape = MklGetInput(context, kInputIndex_BN_Mean).shape();\n OP_REQUIRES(context, fuse_bn_shape.dims() == 1,\n absl::InvalidArgumentError(\n absl::StrCat(\"FusedBatchNorm must be 1D, not: \",\n fuse_bn_shape.DebugString())));\n fuse_bn_dims = {1, fuse_bn_shape.dim_size(0), 1, 1};\n }\n MklConvFwdParams convFwdDims(\n src_dims, filter_dims, fuse_biasadd_ ? bias_dims : NONE_DIMS,\n dst_dims_mkl_order, strides, dilations, padding_left, padding_right,\n fuse_bn_dims, tf_fmt, native_format, is_depthwise, is_filter_const_);\n this->ExtendConvFwdParams(context, convFwdDims);\n Eigen::ThreadPoolInterface* eigen_interface =\n EigenThreadPoolFromTfContext(context);\n tsl::OneDnnThreadPool eigen_tp(eigen_interface,\n ThreadPoolUseCallerThread());\n conv_fwd =\n MklConvFwdPrimitiveFactory::Get(\n convFwdDims, do_not_cache);\n MklDnnShape output_mkl_shape;\n std::shared_ptr conv_fwd_pd = conv_fwd->GetPrimitiveDesc();\n AllocateOutputTensor(context, *conv_fwd_pd, dst_dims_mkl_order, tf_fmt,\n &output_mkl_shape, &dst_tensor);\n Tensor* filter_out_tensor = nullptr;\n if (emit_filter_output) {\n AllocateFilterOutputTensor(context, *conv_fwd_pd,\n TFShapeToMklDnnDims(filter_tf_shape),\n &filter_out_tensor);\n }\n Ttemp_output* dst_data =\n reinterpret_cast(dst_tensor->flat().data());\n Tinput* src_data = nullptr;\n if (src_md != conv_fwd_pd->src_desc()) {\n src.SetUsrMem(src_md, &src_tensor);\n src.CheckReorderToOpMem(conv_fwd_pd->src_desc(), cpu_engine_, context);\n src_data = static_cast(src.GetOpMem().get_data_handle());\n } else {\n src_data = static_cast(\n const_cast(src_tensor.flat().data()));\n }\n Tfilter* filter_data = nullptr;\n if (filter_md != conv_fwd_pd->weights_desc()) {\n bool is_filter_cached = false;\n if (is_filter_const_) {\n if (IsFilterCacheEmpty(context)) {\n CacheFilter(context, conv_fwd_pd, filter_data, filter_tensor,\n filter, filter_md, filter_mkl_shape);\n }\n filter_data = GetCachedFilter(context, conv_fwd_pd->weights_desc());\n is_filter_cached = (filter_data != nullptr);\n }\n if (!is_filter_cached) {\n filter.SetUsrMem(filter_md, &filter_tensor);\n if (filter_out_tensor == nullptr) {\n filter.CheckReorderToOpMem(conv_fwd_pd->weights_desc(), cpu_engine_,\n context);\n } else {\n filter.CheckReorderToOpMem(\n conv_fwd_pd->weights_desc(),\n filter.GetTensorBuffer(filter_out_tensor), cpu_engine_,\n context);\n }\n filter_data =\n static_cast(filter.GetOpMem().get_data_handle());\n }\n } else {\n filter_data = static_cast(\n const_cast(filter_tensor.flat().data()));\n }\n UserScratchPad scratch_pad;\n scratch_pad.AllocateSPTensor(conv_fwd, context);\n std::shared_ptr fwd_cpu_stream;\n fwd_cpu_stream.reset(CreateStream(&eigen_tp, conv_fwd->GetEngine()));\n if (fuse_biasadd_) {\n const Tensor& bias_tensor = MklGetInput(context, kInputIndex_Bias);\n void* bias_data =\n this->GetBiasHandle(context, conv_fwd_pd, bias_tensor);\n conv_fwd->Execute(src_data, filter_data, bias_data, dst_data,\n convFwdDims, fwd_cpu_stream, scratch_pad.Get());\n } else if (fuse_bn_) {\n const Tensor& bn_scale_tensor =\n MklGetInput(context, kInputIndex_BN_Scale);\n Tinput* bn_scale_data = static_cast(\n const_cast(bn_scale_tensor.flat().data()));\n const Tensor& bn_mean_tensor =\n MklGetInput(context, kInputIndex_BN_Mean);\n Tinput* bn_mean_data = static_cast(\n const_cast(bn_mean_tensor.flat().data()));\n const Tensor& bn_offset_tensor =\n MklGetInput(context, kInputIndex_BN_Offset);\n Tinput* bn_offset_data = static_cast(\n const_cast(bn_offset_tensor.flat().data()));\n Tensor bn_rsqrt_tensor;\n OP_REQUIRES_OK(context,\n context->allocate_temp(DataTypeToEnum::v(),\n fuse_bn_shape, &bn_rsqrt_tensor));\n Tinput* bn_rsqrt_data = static_cast(\n const_cast(bn_rsqrt_tensor.flat().data()));\n this->ComputeBNScale(context, epsilon_, kInputIndex_BN_Variance,\n bn_rsqrt_data);\n conv_fwd->Execute(src_data, filter_data, nullptr, dst_data,\n bn_scale_data, bn_mean_data, bn_offset_data,\n bn_rsqrt_data, convFwdDims, fwd_cpu_stream,\n scratch_pad.Get());\n } else {\n conv_fwd->Execute(src_data, filter_data, dst_data, convFwdDims,\n fwd_cpu_stream, scratch_pad.Get());\n }\n if (do_not_cache) delete conv_fwd;\n } catch (dnnl::error& e) {\n string error_msg = tensorflow::strings::StrCat(\n \"Status: \", e.status, \", message: \", string(e.message), \", in file \",\n __FILE__, \":\", __LINE__);\n OP_REQUIRES_OK(context,\n absl::AbortedError(absl::StrCat(\n \"Operation received an exception:\", error_msg)));\n }\n }\n void PadWithConvFusion(OpKernelContext* context, memory::dims& padding_left,\n memory::dims& padding_right, bool pad_attr_enabled,\n string data_format_str_) {\n Tpadding* paddings = nullptr;\n if (pad_attr_enabled) {\n paddings = padding_list_.data();\n } else {\n const Tensor& paddings_tf = MklGetInput(context, input_index_pad_);\n OP_REQUIRES(context, paddings_tf.dims() == 2,\n absl::InvalidArgumentError(\n absl::StrCat(\"paddings must be 2-dimensional: \",\n paddings_tf.shape().DebugString())));\n paddings = static_cast(\n const_cast(paddings_tf.flat().data()));\n }\n int64 pad_top = 0, pad_left = 0, pad_front = 0;\n int64 pad_bottom = 0, pad_right = 0, pad_back = 0;\n if (data_format_str_ == \"NHWC\") {\n pad_top = paddings[2];\n pad_bottom = paddings[3];\n pad_left = paddings[4];\n pad_right = paddings[5];\n } else if (data_format_str_ == \"NCHW\") {\n pad_top = paddings[4];\n pad_bottom = paddings[5];\n pad_left = paddings[6];\n pad_right = paddings[7];\n } else if (data_format_str_ == \"NDHWC\") {\n pad_front = paddings[2];\n pad_back = paddings[3];\n pad_top = paddings[4];\n pad_bottom = paddings[5];\n pad_left = paddings[6];\n pad_right = paddings[7];\n } else if (data_format_str_ == \"NCDHW\") {\n pad_front = paddings[4];\n pad_back = paddings[5];\n pad_top = paddings[6];\n pad_bottom = paddings[7];\n pad_left = paddings[8];\n pad_right = paddings[9];\n }\n if (data_format_str_ == \"NHWC\" || data_format_str_ == \"NCHW\") {\n padding_left = {static_cast(pad_top), static_cast(pad_left)};\n padding_right = {static_cast(pad_bottom),\n static_cast(pad_right)};\n } else if (data_format_str_ == \"NDHWC\" || data_format_str_ == \"NCDHW\") {\n padding_left = {static_cast(pad_front), static_cast(pad_top),\n static_cast(pad_left)};\n padding_right = {static_cast(pad_back), static_cast(pad_bottom),\n static_cast(pad_right)};\n }\n }\n protected:\n void set_input_add_idx(int input_add_idx) {\n input_index_add_ = input_add_idx;\n }\n int get_input_add_idx() { return input_index_add_; }\n void set_fuse_biasadd(bool fuse_biasadd) { fuse_biasadd_ = fuse_biasadd; }\n bool get_fuse_biasadd() { return fuse_biasadd_; }\n void set_fuse_activation(bool fuse_activation, dnnl::algorithm activation_alg,\n float alpha_or_upbound = 0.0, float beta = 0.0) {\n fuse_activation_ = fuse_activation;\n activation_alg_ = activation_alg;\n alpha_or_upbound_ = alpha_or_upbound;\n beta_ = beta;\n }\n void set_fuse_pad(bool fuse_pad) {\n fuse_pad_ = fuse_pad;\n if (fuse_bn_) {\n input_index_pad_ = 6;\n } else if (fuse_add_ && fuse_biasadd_) {\n input_index_pad_ = 4;\n } else {\n input_index_pad_ = 3;\n }\n }\n void set_fuse_add(bool fuse_add) { fuse_add_ = fuse_add; }\n bool get_fuse_add() { return fuse_add_; };\n void set_fuse_bn(bool fuse_bn, float epsilon) {\n fuse_bn_ = fuse_bn;\n epsilon_ = epsilon;\n }\n virtual void ComputeBNScale(OpKernelContext* context, float epsilon,\n int bn_variance_index, Tinput* scale_buf_ptr) {\n OP_REQUIRES(context, false,\n absl::UnimplementedError(\n \"Compute BN scale not expected in base class\"));\n return;\n }\n virtual void ExtendConvFwdParams(OpKernelContext* context,\n MklConvFwdParams& params) {\n params.dtypes.append(typeid(Tinput).name());\n params.dtypes.append(typeid(Tfilter).name());\n params.dtypes.append(typeid(Tbias).name());\n params.dtypes.append(typeid(Toutput).name());\n bool is_quantized_input = std::is_same::value ||\n std::is_same::value;\n if (!is_quantized_input) {\n if (fuse_add_) {\n params.post_op_params.push_back(\n {\"sum\", dnnl::algorithm::undef, {1.0}, \"\"});\n }\n if (fuse_bn_) {\n params.post_op_params.push_back(\n {\"fuse_bn\", dnnl::algorithm::undef, {1.0}, \"\"});\n }\n if (fuse_activation_) {\n params.post_op_params.push_back({\"activation\",\n activation_alg_,\n {1.0, alpha_or_upbound_, beta_},\n \"\"});\n }\n }\n }\n virtual void* GetBiasHandle(OpKernelContext* context,\n std::shared_ptr& conv2d_fwd_pd,\n const Tensor& bias_tensor) {\n if (fuse_biasadd_) {\n return static_cast(\n const_cast(bias_tensor.flat().data()));\n }\n return nullptr;\n }\n virtual void AllocateOutputTensor(OpKernelContext* context,\n const ConvFwdPd& conv_prim_desc,\n const memory::dims& output_dims_mkl_order,\n MklTensorFormat output_tf_format,\n MklDnnShape* output_mkl_shape,\n Tensor** output_tensor) {\n DCHECK(output_tensor);\n#ifndef ENABLE_ONEDNN_V3\n auto dst_md = conv_prim_desc.dst_desc();\n if (!std::is_same::value) {\n#ifndef ENABLE_ONEDNN_V3\n dst_md.data.data_type =\n static_cast(MklDnnType());\n#else\n dst_md =\n memory::desc(output_dims_mkl_order, MklDnnType(),\n MklTensorFormatToMklDnnDataFormat(output_tf_format));\n#endif \n }\n#else\n auto dst_md =\n std::is_same::value\n ? conv_prim_desc.dst_desc()\n : memory::desc(conv_prim_desc.dst_desc().get_dims(),\n MklDnnType(),\n MklTensorFormatToMklDnnDataFormat(output_tf_format));\n#endif \n output_mkl_shape->SetMklTensor(true);\n output_mkl_shape->SET_MKL_LAYOUT(dst_md);\n output_mkl_shape->SetElemType(MklDnnType());\n output_mkl_shape->SetTfLayout(output_dims_mkl_order.size(),\n output_dims_mkl_order, output_tf_format);\n TensorShape output_tf_shape;\n output_tf_shape.AddDim((dst_md.get_size() / sizeof(Toutput)));\n if (native_format) {\n output_tf_shape = output_mkl_shape->GetTfShape();\n }\n bool is_quantized_input = std::is_same::value ||\n std::is_same::value;\n if (fuse_add_ && !is_quantized_input) {\n const Tensor& add_tensor = MklGetInput(context, input_index_add_);\n MklDnnShape add_mkl_shape;\n GetMklShape(context, input_index_add_, &add_mkl_shape, native_format);\n if (native_format && context->forward_input_to_output_with_shape(\n input_index_add_, kOutputIndex_Dst,\n output_tf_shape, output_tensor)) {\n return;\n }\n if (!native_format && add_mkl_shape == *output_mkl_shape &&\n ForwardMklTensorInToOutWithMklShape(context, input_index_add_,\n kOutputIndex_Dst, output_tensor,\n add_mkl_shape, false)) {\n return;\n } else {\n AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,\n output_tf_shape, *output_mkl_shape,\n native_format);\n auto output_format_tag = MklTensorFormatToMklDnnDataFormat(\n output_mkl_shape->GetTfDataFormat());\n OP_REQUIRES(context, output_format_tag != memory::format_tag::undef,\n absl::InvalidArgumentError(\n \"MklConvOp: AddN fusion: Invalid data format\"));\n auto add_md =\n add_mkl_shape.IsMklTensor()\n ? add_mkl_shape.GetMklLayout()\n : memory::desc(output_dims_mkl_order, MklDnnType(),\n output_format_tag);\n void* add_buf = static_cast(\n const_cast(add_tensor.flat().data()));\n void* dst_buf =\n static_cast((*output_tensor)->flat().data());\n if (native_format) {\n add_md = dst_md =\n memory::desc({add_tensor.NumElements()}, MklDnnType(),\n dnnl::memory::format_tag::x);\n }\n fuse_add_src_.reset(new memory(add_md, this->cpu_engine_, add_buf));\n fuse_add_dst_.reset(new memory(dst_md, this->cpu_engine_, dst_buf));\n auto reorder_desc =\n ReorderPd(this->cpu_engine_, add_md, this->cpu_engine_, dst_md);\n CreateAndExecuteReorder(reorder_desc, *fuse_add_src_, *fuse_add_dst_,\n this->cpu_engine_, context);\n }\n } else {\n AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,\n output_tf_shape, *output_mkl_shape,\n native_format);\n }\n }\n engine cpu_engine_ = engine(engine::kind::cpu, 0);\n private:\n std::shared_ptr fuse_add_src_;\n std::shared_ptr fuse_add_dst_;\n std::vector strides_;\n std::vector dilations_;\n std::vector padding_list_;\n bool is_filter_const_;\n mutex mu_;\n Padding padding_;\n string data_format_str_;\n TensorFormat data_format_;\n Tensor cached_filter_data_ TF_GUARDED_BY(mu_);\n#ifndef ENABLE_ONEDNN_V3\n Tensor cached_filter_md_ TF_GUARDED_BY(mu_);\n#else\n FilterMemoryDesc cached_filter_md_ TF_GUARDED_BY(mu_);\n#endif \n bool fuse_biasadd_ = bias_enabled;\n bool fuse_activation_ = false;\n bool fuse_pad_ = pad_enabled;\n bool fuse_add_ = false;\n bool fuse_bn_ = false;\n float epsilon_ = 0.0001;\n float alpha_or_upbound_ = 0.0;\n float beta_ = 0.0;\n dnnl::algorithm activation_alg_ = dnnl::algorithm::undef;\n int input_index_pad_ = 2;\n int input_index_add_ = 3;\n const int kInputIndex_Src = 0, kInputIndex_Filter = 1, kInputIndex_Bias = 2;\n const int kOutputIndex_Dst = 0, kOutputIndex_Filter = 1;\n const int kDilationH = 0, kDilationW = 1;\n const int kInputIndex_BN_Scale = 2, kInputIndex_BN_Offset = 3;\n const int kInputIndex_BN_Mean = 4, kInputIndex_BN_Variance = 5;\n MklTensorFormat GetFilterTfDataFormat(const MklDnnShape* filter_mkl_shape,\n const ConvFwdPd& conv_prim_desc) const {\n DCHECK(filter_mkl_shape);\n return filter_mkl_shape->GetTfDataFormat();\n }\n void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,\n Tensor** filter_tensor,\n const MklDnnShape* filter_mkl_shape)\n TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {\n DCHECK(filter_tensor);\n TensorShape filter_tf_shape;\n filter_tf_shape.AddDim(\n (conv_prim_desc.weights_desc().get_size() / sizeof(Tfilter)));\n OP_REQUIRES_OK(\n context, context->allocate_temp(DataTypeToEnum::value,\n filter_tf_shape, &cached_filter_data_));\n *filter_tensor = &cached_filter_data_;\n memory::desc weights_desc = conv_prim_desc.weights_desc();\n#ifndef ENABLE_ONEDNN_V3\n TensorShape cached_filter_md_shape;\n cached_filter_md_shape.AddDim(sizeof(weights_desc) / sizeof(uint8));\n OP_REQUIRES_OK(context,\n context->allocate_temp(DT_UINT8, cached_filter_md_shape,\n &cached_filter_md_));\n *reinterpret_cast(cached_filter_md_.flat().data()) =\n weights_desc;\n#else\n cached_filter_md_ = FilterMemoryDesc(\n weights_desc.get_ndims(), weights_desc.get_inner_nblks(),\n weights_desc.get_data_type(), weights_desc.get_dims(),\n weights_desc.get_inner_blks(), weights_desc.get_inner_idxs(),\n weights_desc.get_strides());\n#endif \n }\n void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,\n Tensor** filter_tensor) {\n AllocateTensor(context, conv_prim_desc, filter_tensor, nullptr);\n }\n void AllocateFilterOutputTensor(OpKernelContext* context,\n const ConvFwdPd& conv_prim_desc,\n const memory::dims& filter_dims_tf_order,\n Tensor** filter_tensor) {\n DCHECK(filter_tensor);\n auto filter_md = conv_prim_desc.weights_desc();\n MklDnnShape filter_mkl_shape;\n filter_mkl_shape.SetMklTensor(true);\n filter_mkl_shape.SET_MKL_LAYOUT(filter_md);\n filter_mkl_shape.SetElemType(MklDnnType());\n filter_mkl_shape.SetTfLayout(filter_dims_tf_order.size(),\n filter_dims_tf_order,\n MklTensorFormat::FORMAT_BLOCKED);\n TensorShape filter_tf_shape;\n filter_tf_shape.AddDim((filter_md.get_size() / sizeof(Tfilter)));\n AllocateOutputSetMklShape(context, kOutputIndex_Filter, filter_tensor,\n filter_tf_shape, filter_mkl_shape);\n }\n inline bool IsFilterCacheEmpty(OpKernelContext* context)\n TF_LOCKS_EXCLUDED(mu_) {\n tf_shared_lock lock(mu_);\n const Tensor& cached_filter_data_tensor = cached_filter_data_;\n return (cached_filter_data_tensor.NumElements() == 0);\n }\n void CacheFilter(OpKernelContext* context,\n const std::shared_ptr& conv_fwd_pd,\n Tfilter* filter_data, const Tensor& filter_tensor,\n MklDnnData& filter, const memory::desc& filter_md,\n const MklDnnShape& filter_mkl_shape) TF_LOCKS_EXCLUDED(mu_) {\n mutex_lock lock(mu_);\n const Tensor& cached_filter_data_tensor = cached_filter_data_;\n if (cached_filter_data_tensor.NumElements() > 0) {\n return;\n }\n#ifdef ENABLE_ONEDNN_V3\n if (filter_md.get_format_kind() != memory::format_kind::blocked) {\n return;\n }\n#endif \n filter.SetUsrMem(filter_md, &filter_tensor);\n filter.CheckReorderToOpMem(conv_fwd_pd.get()->weights_desc(),\n this->cpu_engine_, context);\n filter_data = static_cast(filter.GetOpMem().get_data_handle());\n Tensor* filter_tensor_ptr = nullptr;\n AllocateTensor(context, *conv_fwd_pd, &filter_tensor_ptr,\n &filter_mkl_shape);\n void* cached_filter_data = filter.GetTensorBuffer(filter_tensor_ptr);\n size_t cached_filter_data_size = filter.GetOpMem().get_desc().get_size();\n memcpy(cached_filter_data, filter_data, cached_filter_data_size);\n }\n#ifndef ENABLE_ONEDNN_V3\n bool AreMemoryDescriptorsEqual(const memory::desc& filter_md,\n const Tensor& cached_filter_md) {\n auto filter_md_data = filter_md.data;\n const char* filter_data = reinterpret_cast(&filter_md_data);\n auto cached_filter_md_data = cached_filter_md.scalar()();\n const char* cached_filter_data =\n reinterpret_cast(&cached_filter_md_data);\n for (size_t i = 0; i < sizeof(filter_md_data); ++i) {\n if (*filter_data++ != *cached_filter_data++) {\n return false;\n }\n }\n return true;\n }\n#endif \n Tfilter* GetCachedFilter(OpKernelContext* context,\n const memory::desc& filter_md)\n TF_LOCKS_EXCLUDED(mu_) {\n tf_shared_lock lock(mu_);\n const Tensor& cached_filter_data = cached_filter_data_;\n#ifndef ENABLE_ONEDNN_V3\n const Tensor& cached_filter_md = cached_filter_md_;\n if (filter_md == *static_cast(cached_filter_md.data())) {\n return static_cast(\n const_cast(cached_filter_data.flat().data()));\n }\n return nullptr;\n#else\n if (cached_filter_md_ ==\n FilterMemoryDesc(filter_md.get_ndims(), filter_md.get_inner_nblks(),\n filter_md.get_data_type(), filter_md.get_dims(),\n filter_md.get_inner_blks(), filter_md.get_inner_idxs(),\n filter_md.get_strides())) {\n return static_cast(\n const_cast(cached_filter_data.flat().data()));\n }\n return nullptr;\n#endif \n }\n};\ntemplate \nclass MklFusedConvOp\n : public MklConvOp {\n public:\n explicit MklFusedConvOp(OpKernelConstruction* context)\n : MklConvOp(context) {\n std::vector fused_ops;\n OP_REQUIRES_OK(context, context->GetAttr(\"fused_ops\", &fused_ops));\n int num_args;\n OP_REQUIRES_OK(context, context->GetAttr(\"num_args\", &num_args));\n OP_REQUIRES(context, !fused_ops.empty(),\n absl::InvalidArgumentError(\n \"Fused Conv2D must have at least one fused op.\"));\n if (fused_ops == std::vector{\"BiasAdd\"}) {\n this->set_fuse_biasadd(true);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"Relu\"}) {\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n } else if (fused_ops == std::vector{\"Relu6\"}) {\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n } else if (fused_ops == std::vector{\"Elu\"}) {\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n } else if (fused_ops == std::vector{\"LeakyRelu\"}) {\n float leakyrelu_alpha;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n } else if (fused_ops == std::vector{\"FusedBatchNorm\"}) {\n float epsilon;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu6\"}) {\n this->set_fuse_biasadd(true);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Elu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"LeakyRelu\"}) {\n this->set_fuse_biasadd(true);\n float leakyrelu_alpha;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"_FusedHardSwish\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,\n 1.0 / 6.0, 0.5);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have two extra arguments: bias and add.\"));\n } else if (fused_ops == std::vector{\"FusedBatchNorm\", \"Relu\"}) {\n float epsilon;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n } else if (fused_ops == std::vector{\"FusedBatchNorm\", \"Relu6\"}) {\n float epsilon;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n } else if (fused_ops == std::vector{\"FusedBatchNorm\", \"Elu\"}) {\n float epsilon;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n } else if (fused_ops ==\n std::vector{\"FusedBatchNorm\", \"LeakyRelu\"}) {\n float epsilon, leakyrelu_alpha;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n } else if (fused_ops ==\n std::vector{\"FusedBatchNorm\", \"_MklSwish\"}) {\n float epsilon;\n OP_REQUIRES_OK(context, context->GetAttr(\"epsilon\", &epsilon));\n OP_REQUIRES(\n context, num_args == 4,\n absl::InvalidArgumentError(\n \"Fused Conv2D with batchnorm must have 4 extra argument\"));\n this->set_fuse_bn(true, epsilon);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Relu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have two extra arguments: bias and add.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Relu6\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have two extra arguments: bias and add.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Elu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have two extra arguments: bias and add.\"));\n } else if (fused_ops ==\n std::vector{\"BiasAdd\", \"Add\", \"LeakyRelu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n float leakyrelu_alpha;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have two extra arguments: bias and add.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Mish\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish, 1.0);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"_FusedConv2D must have one extra argument: bias.\"));\n } else if (fused_ops == std::vector{\"BiasAdd\", \"_MklSwish\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv2D must have one extra argument: bias.\"));\n } else {\n OP_REQUIRES(context, false,\n absl::UnimplementedError(\n absl::StrCat(\"Fusion is not implemented: [\",\n absl::StrJoin(fused_ops, \",\"), \"]\")));\n }\n if (pad_enabled) {\n this->set_fuse_pad(true);\n }\n }\n void ComputeBNScale(OpKernelContext* context, float epsilon,\n int bn_variance_index, Tinput* scale_buf_ptr) override {\n const Tensor& bn_var_tensor = MklGetInput(context, bn_variance_index);\n Eigen::Tensor bn_rsqrt =\n (bn_var_tensor.flat() + static_cast(epsilon)).rsqrt();\n Tinput* bn_rsqrt_data = bn_rsqrt.data();\n int64_t num_elem = bn_var_tensor.shape().dim_size(0);\n for (int64_t i = 0; i < num_elem; i++) {\n scale_buf_ptr[i] = bn_rsqrt_data[i];\n }\n return;\n }\n virtual ~MklFusedConvOp() {}\n};\ntemplate \nclass MklFusedDepthwiseConvOp\n : public MklConvOp {\n public:\n explicit MklFusedDepthwiseConvOp(OpKernelConstruction* context)\n : MklConvOp(\n context) {\n std::vector fused_ops;\n OP_REQUIRES_OK(context, context->GetAttr(\"fused_ops\", &fused_ops));\n int num_args;\n OP_REQUIRES_OK(context, context->GetAttr(\"num_args\", &num_args));\n OP_REQUIRES(context, !fused_ops.empty(),\n absl::InvalidArgumentError(\n \"Fused DepthwiseConv2D must have at least one fused op.\"));\n if (fused_ops == std::vector{\"BiasAdd\"}) {\n this->set_fuse_biasadd(true);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu6\"}) {\n this->set_fuse_biasadd(true);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Elu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"_FusedHardSwish\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,\n 1.0 / 6.0, 0.5);\n } else {\n OP_REQUIRES(context, false,\n absl::InvalidArgumentError(\n absl::StrCat(\"Fusion is not implemented: [\",\n absl::StrJoin(fused_ops, \",\"), \"]\")));\n }\n OP_REQUIRES(\n context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused DepthwiseConv2D must have one extra argument: bias.\"));\n if (pad_enabled) {\n this->set_fuse_pad(true);\n }\n }\n virtual ~MklFusedDepthwiseConvOp() {}\n};\nenum class oneDNNFusedOps { kBias = 1, kSum = 2, kRelu = 4, kRequantize = 8 };\ntemplate \nclass MklQuantizedConvOp\n : public MklConvOp<\n Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,\n int32, false, false,\n is_depthwise, true> {\n public:\n virtual ~MklQuantizedConvOp() {\n if (this->input_bias_ != nullptr) {\n delete this->input_bias_;\n input_bias_ = nullptr;\n }\n if (this->scaled_bias_ != nullptr) {\n delete this->scaled_bias_;\n scaled_bias_ = nullptr;\n }\n }\n explicit MklQuantizedConvOp(OpKernelConstruction* context)\n : MklConvOp(context) {\n std::vector> supported_fusions = {\n {\"BiasAdd\"},\n {\"Relu\"},\n {\"Requantize\"},\n {\"BiasAdd\", \"Relu\"},\n {\"BiasAdd\", \"Requantize\"},\n {\"Relu\", \"Requantize\"},\n {\"BiasAdd\", \"Relu\", \"Requantize\"},\n {\"BiasAdd\", \"Sum\", \"Relu\"},\n {\"BiasAdd\", \"Sum\", \"Relu\", \"Requantize\"}};\n std::vector fused_ops_attr;\n if (context->HasAttr(\"fused_ops\")) {\n OP_REQUIRES_OK(context, context->GetAttr(\"fused_ops\", &fused_ops_attr));\n }\n OP_REQUIRES(context, !(fused_ops_attr.size() > 0 && num_fused_ops > 0),\n absl::InvalidArgumentError(\n \"QuantizedConv fused ops should be only available through \"\n \"either new API or old API, got both.\"));\n if (fused_ops_attr.size() > 0) {\n fused_ops_ = fused_ops_attr;\n } else if (num_fused_ops > 0) {\n for (int i = 0; i < num_fused_ops; ++i) {\n fused_ops_.push_back(legacy_fused_ops[i]);\n }\n }\n if (fused_ops_.size() > 0) {\n bool is_fusion_supported =\n std::find(supported_fusions.begin(), supported_fusions.end(),\n fused_ops_) != supported_fusions.end();\n OP_REQUIRES(context, is_fusion_supported,\n absl::InvalidArgumentError(\n absl::StrCat(\"Unsupported QuantizedConv fusion: [\",\n absl::StrJoin(fused_ops_, \",\"), \"]\")));\n }\n for (const auto& op : fused_ops_) {\n fused_op_flags_ ^= static_cast(StrToEnum(op));\n }\n DataType bias_dt, summand_dt, out_dt;\n if (IsFused(oneDNNFusedOps::kBias)) {\n this->set_fuse_biasadd(true);\n OP_REQUIRES_OK(context,\n context->GetAttr(\"is_bias_const\", &is_bias_const_));\n if (context->HasAttr(\"Tbias\")) {\n OP_REQUIRES_OK(context, context->GetAttr(\"Tbias\", &bias_dt));\n }\n }\n if (IsFused(oneDNNFusedOps::kSum)) {\n this->set_fuse_add(true);\n }\n const bool fuse_requantize = IsFused(oneDNNFusedOps::kRequantize);\n OP_REQUIRES_OK(context, context->GetAttr(\"out_type\", &out_dt));\n if (fuse_requantize) {\n OP_REQUIRES(\n context, out_dt == DT_QINT8 || out_dt == DT_QUINT8,\n absl::InvalidArgumentError(\"QuantizedConv: unsupported output \"\n \"type when Requantize is fused.\"));\n }\n if (context->HasAttr(\"Tsummand\")) {\n OP_REQUIRES_OK(context, context->GetAttr(\"Tsummand\", &summand_dt));\n if (!this->get_fuse_add()) {\n OP_REQUIRES(\n context, summand_dt == out_dt,\n absl::InvalidArgumentError(\n \"QuantizedConv: incorrect summand data type. When Sum is not \"\n \"fused, Tsummand attribute must have same value as out_type.\"));\n }\n }\n#ifndef ENABLE_ONEDNN_V3\n int idx = fuse_requantize ? 1 : 0;\n#else\n post_op_to_idx_[\"src_scale\"] = 0;\n post_op_to_idx_[\"wei_scale\"] = 1;\n post_op_to_idx_[\"dst_scale\"] = 2;\n int idx = 3;\n#endif \n for (int i = 0; i < fused_ops_.size(); ++i) {\n if (fused_ops_[i] == \"Requantize\") {\n#ifndef ENABLE_ONEDNN_V3\n post_op_to_idx_[\"output_scale\"] = 0;\n#endif \n } else if (fused_ops_[i] == \"Sum\") {\n post_op_to_idx_[\"sum\"] = idx++;\n } else if (fused_ops_[i] == \"Relu\") {\n post_op_to_idx_[\"activation\"] = idx++;\n }\n }\n bool is_filter_const;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"is_filter_const\", &is_filter_const));\n OP_REQUIRES(\n context, is_filter_const,\n absl::InvalidArgumentError(\"QuantizedConv: filter must be a constant\"));\n if (num_fused_ops == -1) {\n int non_minmax_arg_idx_base = 2;\n int minmax_arg_idx_base = 6;\n int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;\n int summand_idx_offset = this->get_fuse_add() ? 1 : 0;\n int bias_min_max_idx_offset =\n this->get_fuse_biasadd() &&\n !(bias_dt == DT_FLOAT || bias_dt == DT_QINT32)\n ? 2\n : 0;\n min_input_idx_ =\n non_minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;\n max_input_idx_ = min_input_idx_ + 1;\n min_filter_idx_ = min_input_idx_ + 2;\n max_filter_idx_ = min_input_idx_ + 3;\n if (this->get_fuse_biasadd()) {\n min_bias_idx_ =\n minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;\n max_bias_idx_ = min_bias_idx_ + 1;\n }\n if (this->get_fuse_add()) {\n this->set_input_add_idx(non_minmax_arg_idx_base + bias_idx_offset);\n if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {\n min_summand_idx_ = minmax_arg_idx_base + bias_idx_offset +\n summand_idx_offset + bias_min_max_idx_offset;\n max_summand_idx_ = min_summand_idx_ + 1;\n }\n }\n if (fuse_requantize) {\n min_freezed_output_idx_ = context->num_inputs() - 2;\n max_freezed_output_idx_ = min_freezed_output_idx_ + 1;\n }\n } else {\n int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;\n min_input_idx_ = 2 + bias_idx_offset;\n max_input_idx_ = 3 + bias_idx_offset;\n min_filter_idx_ = 4 + bias_idx_offset;\n max_filter_idx_ = 5 + bias_idx_offset;\n if (fuse_requantize) {\n min_freezed_output_idx_ = 6 + bias_idx_offset;\n max_freezed_output_idx_ = 7 + bias_idx_offset;\n }\n if (this->get_fuse_add()) {\n int input_add_idx = std::is_same::value\n ? context->num_inputs() - 1 - 2\n : context->num_inputs() - 1;\n this->set_input_add_idx(input_add_idx);\n if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {\n min_summand_idx_ = 9 + bias_idx_offset;\n max_summand_idx_ = 10 + bias_idx_offset;\n }\n }\n }\n }\n void Compute(OpKernelContext* context) override {\n MklConvOp::Compute(context);\n const float min_input =\n context->input(min_input_idx_).template scalar()();\n const float max_input =\n context->input(max_input_idx_).template scalar()();\n Tensor* output_min = nullptr;\n Tensor* output_max = nullptr;\n if (std::is_same::value ||\n std::is_same::value) {\n OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));\n OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));\n output_min->flat()(0) =\n context->input(min_freezed_output_idx_).template scalar()();\n output_max->flat()(0) =\n context->input(max_freezed_output_idx_).template scalar()();\n } else {\n const Tensor& min_filter = context->input(min_filter_idx_);\n const Tensor& max_filter = context->input(max_filter_idx_);\n if (min_filter.dims() == 0) {\n float min_output_value;\n float max_output_value;\n MklQuantizationRangeForMultiplication(\n min_input, max_input, min_filter.scalar()(),\n max_filter.scalar()(), &min_output_value, &max_output_value);\n OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));\n OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));\n output_min->flat()(0) = min_output_value;\n output_max->flat()(0) = max_output_value;\n } else {\n size_t depth = min_filter.NumElements();\n OP_REQUIRES_OK(context,\n context->allocate_output(\n 1, {static_cast(depth)}, &output_min));\n OP_REQUIRES_OK(context,\n context->allocate_output(\n 2, {static_cast(depth)}, &output_max));\n MklQuantizationRangeForMultiplication(\n min_input, max_input, min_filter, max_filter, &output_min,\n &output_max);\n }\n }\n }\n protected:\n void ExtendConvFwdParams(OpKernelContext* context,\n MklConvFwdParams& params) override {\n MklConvOp::ExtendConvFwdParams(context, params);\n params.post_op_params.resize(post_op_to_idx_.size());\n const float min_input =\n context->input(min_input_idx_).template scalar()();\n const float max_input =\n context->input(max_input_idx_).template scalar()();\n const Tensor& min_filter_vector = context->input(min_filter_idx_);\n const Tensor& max_filter_vector = context->input(max_filter_idx_);\n OP_REQUIRES(\n context,\n ((min_filter_vector.NumElements() > 0) &&\n (max_filter_vector.NumElements() > 0) &&\n (min_filter_vector.shape() == max_filter_vector.shape())),\n absl::InvalidArgumentError(\"`min_ and max_filter` must have same\"\n \"shape and contain at least one element.\"));\n size_t depth = min_filter_vector.NumElements();\n const float* min_filter = min_filter_vector.flat().data();\n const float* max_filter = max_filter_vector.flat().data();\n std::vector SCALE(depth);\n float float_input_range =\n std::max(std::abs(min_input), std::abs(max_input));\n#ifdef ENABLE_ONEDNN_V3\n float int_input_limit =\n std::is_same::value ? 255.0f : 127.0f;\n const float src_scale = float_input_range / int_input_limit;\n#endif \n if (std::is_same::value ||\n std::is_same::value) {\n const float min_freezed_output =\n context->input(min_freezed_output_idx_).template scalar()();\n const float max_freezed_output =\n context->input(max_freezed_output_idx_).template scalar()();\n float int_output_limit =\n std::is_same::value ? 255.0f : 127.0f;\n float float_output_range =\n std::max(std::abs(min_freezed_output), std::abs(max_freezed_output));\n#ifndef ENABLE_ONEDNN_V3\n const float int_const_scale_limit =\n (std::is_same::value) ? 255.0 * 127.0 : 127.0 * 127.0;\n#endif \n for (size_t i = 0; i < depth; ++i) {\n float float_filter_range =\n std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));\n#ifndef ENABLE_ONEDNN_V3\n scales[i] = int_output_limit * float_input_range * float_filter_range /\n (int_const_scale_limit * float_output_range);\n#else\n wei_scale[i] = float_filter_range / 127.0;\n#endif \n }\n#ifndef ENABLE_ONEDNN_V3\n FactoryKeyCreator param_key;\n param_key.AddAsKey(min_input);\n param_key.AddAsKey(max_input);\n param_key.AddAsKey(min_freezed_output);\n param_key.AddAsKey(max_freezed_output);\n param_key.AddAsKey(min_filter);\n param_key.AddAsKey(max_filter);\n params.post_op_params[post_op_to_idx_[\"output_scale\"]] = {\n \"output_scale\", dnnl::algorithm::undef, scales, param_key.GetKey()};\n#else\n const float dst_scale = float_output_range / int_output_limit;\n FactoryKeyCreator dst_param_key;\n dst_param_key.AddAsKey(min_freezed_output);\n dst_param_key.AddAsKey(max_freezed_output);\n params.post_op_params[post_op_to_idx_[\"dst_scale\"]] = {\n \"dst_scale\",\n dnnl::algorithm::undef,\n {dst_scale},\n dst_param_key.GetKey()};\n#endif \n } else {\n#ifdef ENABLE_ONEDNN_V3\n if (!std::is_same::value)\n TF_CHECK_OK(absl::FailedPreconditionError(\n \"Output datatype is expected to be qint32.\"));\n float min_min_filter = min_filter[0];\n float max_max_filter = max_filter[0];\n for (size_t i = 0; i < depth; ++i) {\n float float_filter_range =\n std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));\n wei_scale[i] = float_filter_range / 127.0;\n if (min_filter[i] < min_min_filter) min_min_filter = min_filter[i];\n if (max_filter[i] > max_max_filter) max_max_filter = max_filter[i];\n }\n const float single_wei_scale =\n std::max(std::abs(min_min_filter), std::abs(max_max_filter)) / 127.0;\n const float dst_scale = single_wei_scale * src_scale;\n FactoryKeyCreator dst_param_key;\n dst_param_key.AddAsKey(dst_scale);\n params.post_op_params[post_op_to_idx_[\"dst_scale\"]] = {\n \"dst_scale\",\n dnnl::algorithm::undef,\n {dst_scale},\n dst_param_key.GetKey()};\n#endif \n }\n#ifdef ENABLE_ONEDNN_V3\n FactoryKeyCreator src_param_key;\n src_param_key.AddAsKey(min_input);\n src_param_key.AddAsKey(max_input);\n FactoryKeyCreator wei_param_key;\n wei_param_key.AddAsKey(min_filter);\n wei_param_key.AddAsKey(max_filter);\n params.post_op_params[post_op_to_idx_[\"src_scale\"]] = {\n \"src_scale\",\n dnnl::algorithm::undef,\n {src_scale},\n src_param_key.GetKey()};\n params.post_op_params[post_op_to_idx_[\"wei_scale\"]] = {\n \"wei_scale\", dnnl::algorithm::undef, wei_scale, wei_param_key.GetKey()};\n#endif \n if (this->get_fuse_add()) {\n DataType summand_dt = this->input_type(this->get_input_add_idx());\n if (std::is_same::value) {\n bool summand_condition =\n (summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);\n DCHECK((summand_condition));\n const Tensor& min_freezed_output_tensor =\n context->input(min_freezed_output_idx_);\n const Tensor& max_freezed_output_tensor =\n context->input(max_freezed_output_idx_);\n OP_REQUIRES(\n context,\n TensorShapeUtils::IsScalar(min_freezed_output_tensor.shape()),\n absl::InvalidArgumentError(\n absl::StrCat(\"`min_freezed_output` must be rank 0 but is rank \",\n min_freezed_output_tensor.dims())));\n OP_REQUIRES(\n context,\n TensorShapeUtils::IsScalar(max_freezed_output_tensor.shape()),\n absl::InvalidArgumentError(\n absl::StrCat(\"`max_freezed_output` must be rank 0 but is rank \",\n max_freezed_output_tensor.dims())));\n const Tensor& min_freezed_summand_tensor =\n context->input(min_summand_idx_);\n const Tensor& max_freezed_summand_tensor =\n context->input(max_summand_idx_);\n OP_REQUIRES(\n context,\n TensorShapeUtils::IsScalar(min_freezed_summand_tensor.shape()),\n absl::InvalidArgumentError(absl::StrCat(\n \"`min_freezed_summand` must be rank 0 but is rank \",\n min_freezed_summand_tensor.dims())));\n OP_REQUIRES(\n context,\n TensorShapeUtils::IsScalar(max_freezed_summand_tensor.shape()),\n absl::InvalidArgumentError(absl::StrCat(\n \"`max_freezed_summand` must be rank 0 but is rank \",\n max_freezed_summand_tensor.dims())));\n#ifndef ENABLE_ONEDNN_V3\n const float min_freezed_output =\n min_freezed_output_tensor.template scalar()();\n const float max_freezed_output =\n max_freezed_output_tensor.template scalar()();\n float output_range = std::max(std::abs(min_freezed_output),\n std::abs(max_freezed_output));\n#endif \n const float min_freezed_summand =\n min_freezed_summand_tensor.template scalar()();\n const float max_freezed_summand =\n max_freezed_summand_tensor.template scalar()();\n float summand_range = std::max(std::abs(min_freezed_summand),\n std::abs(max_freezed_summand));\n if (summand_dt == DT_QUINT8) {\n params.post_op_params[post_op_to_idx_[\"sum\"]] = {\n \"sum\",\n dnnl::algorithm::undef,\n {SUMMAND_SCALE_U8(summand_range, output_range)},\n \"\"};\n } else {\n params.post_op_params[post_op_to_idx_[\"sum\"]] = {\n \"sum\",\n dnnl::algorithm::undef,\n {SUMMAND_SCALE_S8(summand_range, output_range)},\n \"\"};\n }\n } else {\n params.post_op_params[post_op_to_idx_[\"sum\"]] = {\"sum\",\n dnnl::algorithm::undef,\n {1.0},\n \"\",\n#ifdef ENABLE_ONEDNN_V3\n summand_dt\n#endif \n };\n }\n }\n if (IsFused(oneDNNFusedOps::kRelu)) {\n params.post_op_params[post_op_to_idx_[\"activation\"]] = {\n \"activation\", dnnl::algorithm::eltwise_relu, {1.0, 0.0, 0.0}, \"\"};\n }\n }\n void AllocateOutputTensor(OpKernelContext* context,\n const ConvFwdPd& conv_prim_desc,\n const memory::dims& output_dims_mkl_order,\n MklTensorFormat output_tf_format,\n MklDnnShape* output_mkl_shape,\n Tensor** output_tensor) override {\n if (!this->get_fuse_add()) {\n MklConvOp<\n Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,\n int32,\n false, false, is_depthwise,\n true>::AllocateOutputTensor(context, conv_prim_desc,\n output_dims_mkl_order,\n output_tf_format,\n output_mkl_shape,\n output_tensor);\n } else {\n if (std::is_same::value) {\n int summand_idx = this->get_input_add_idx();\n DataType summand_dt = this->input_type(summand_idx);\n bool summand_condition =\n (summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);\n DCHECK((summand_condition));\n Tensor& summand = const_cast(context->input(summand_idx));\n if (summand_dt == DT_QINT8) {\n OP_REQUIRES_OK(context, summand.BitcastFrom(summand, DT_QUINT8,\n summand.shape()));\n }\n OP_REQUIRES(context,\n context->forward_input_to_output_with_shape(\n summand_idx, 0, summand.shape(), output_tensor),\n absl::InvalidArgumentError(\n \"Summand cannot be forwarded in the current fusion.\"));\n return;\n }\n#ifndef ENABLE_ONEDNN_V3\n MklConvOp<\n Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,\n int32,\n false, false, is_depthwise,\n true>::AllocateOutputTensor(context, conv_prim_desc,\n output_dims_mkl_order,\n output_tf_format,\n output_mkl_shape,\n output_tensor);\n const Tensor& summand = context->input(this->get_input_add_idx());\n if (summand.dtype() != DT_FLOAT)\n TF_CHECK_OK(absl::FailedPreconditionError(\n \"Current fusion requires summand to be float\"));\n const float min_input =\n context->input(min_input_idx_).template scalar()();\n const float max_input =\n context->input(max_input_idx_).template scalar()();\n const Tensor& min_filter_vector = context->input(min_filter_idx_);\n const Tensor& max_filter_vector = context->input(max_filter_idx_);\n const float* min_filter = min_filter_vector.flat().data();\n const float* max_filter = max_filter_vector.flat().data();\n const float int_const_scale_limit =\n (std::is_same::value) ? 255.0 * 127.0 : 127.0 * 127.0;\n size_t depth = min_filter_vector.NumElements();\n std::vector scales(depth);\n for (size_t i = 0; i < depth; ++i) {\n scales[i] =\n int_const_scale_limit /\n (std::max(std::abs(max_input), std::abs(min_input)) *\n std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));\n }\n dnnl::primitive_attr reorder_attr;\n#ifndef ENABLE_ONEDNN_V3\n if (depth == 1) {\n reorder_attr.set_output_scales(0, scales);\n } else {\n reorder_attr.set_output_scales(2, scales);\n }\n#else\n DCHECK_EQ(depth, 1);\n reorder_attr.set_scales_mask(DNNL_ARG_SRC, 0);\n reorder_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);\n reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);\n#endif \n auto summand_md = memory::desc(output_dims_mkl_order, MklDnnType(),\n memory::format_tag::nhwc);\n void* summand_buf =\n static_cast(const_cast(summand.flat().data()));\n void* dst_buf =\n static_cast((*output_tensor)->flat().data());\n summand_.reset(new memory(summand_md, this->cpu_engine_, summand_buf));\n dst_.reset(\n new memory(conv_prim_desc.dst_desc(), this->cpu_engine_, dst_buf));\n auto reorder_desc =\n ReorderPd(this->cpu_engine_, summand_md, this->cpu_engine_,\n conv_prim_desc.dst_desc(), reorder_attr);\n CreateAndExecuteReorder(reorder_desc, *summand_, *dst_, this->cpu_engine_,\n context);\n#else\n int summand_idx = this->get_input_add_idx();\n DataType summand_dt = this->input_type(summand_idx);\n if (summand_dt != DT_FLOAT)\n TF_CHECK_OK(absl::FailedPreconditionError(\n \"Summand datatype is expected to be float.\"));\n Tensor& summand_float = const_cast(context->input(summand_idx));\n OP_REQUIRES_OK(context,\n summand_float.BitcastFrom(summand_float, DT_QINT32,\n summand_float.shape()));\n OP_REQUIRES(context,\n context->forward_input_to_output_with_shape(\n summand_idx, 0, summand_float.shape(), output_tensor),\n absl::InvalidArgumentError(\n \"Summand cannot be forwarded in the current fusion.\"));\n#endif \n }\n }\n void* GetBiasHandle(OpKernelContext* context,\n std::shared_ptr& conv_fwd_pd,\n const Tensor& bias_tensor) override {\n if (!this->get_fuse_biasadd()) {\n return nullptr;\n }\n#ifndef ENABLE_ONEDNN_V3\n if (std::is_same::value) {\n return static_cast(\n const_cast(bias_tensor.flat().data()));\n }\n const float min_input =\n context->input(min_input_idx_).template scalar()();\n const float max_input =\n context->input(max_input_idx_).template scalar()();\n const Tensor& min_filter_vector = context->input(min_filter_idx_);\n const Tensor& max_filter_vector = context->input(max_filter_idx_);\n const float* min_filter = min_filter_vector.flat().data();\n const float* max_filter = max_filter_vector.flat().data();\n const float int_const_scale_limit =\n (std::is_same::value) ? 255.0 * 127.0 : 127.0 * 127.0;\n size_t depth = min_filter_vector.NumElements();\n bool scales_are_valid = (depth == scales_.size());\n scales_.resize(depth);\n for (size_t i = 0; i < depth; ++i) {\n float tmp_scale =\n int_const_scale_limit /\n (std::max(std::abs(max_input), std::abs(min_input)) *\n std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));\n if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {\n scales_are_valid = false;\n }\n scales_[i] = tmp_scale;\n }\n if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {\n dnnl::primitive_attr bias_attr;\n#ifndef ENABLE_ONEDNN_V3\n if (depth == 1) {\n bias_attr.set_output_scales(0, scales_);\n } else {\n bias_attr.set_output_scales(1, scales_);\n }\n#else\n DCHECK_EQ(depth, 1);\n bias_attr.set_scales_mask(DNNL_ARG_SRC, 0);\n bias_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);\n bias_attr.set_scales_mask(DNNL_ARG_DST, 0);\n#endif \n auto bias_md = memory::desc({static_cast(bias_tensor.NumElements())},\n MklDnnType(), memory::format_tag::x);\n void* bias_buf = static_cast(\n const_cast(bias_tensor.flat().data()));\n if (!input_bias_) {\n input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);\n } else {\n input_bias_->set_data_handle(bias_buf);\n }\n if (!scaled_bias_buf_)\n AllocTmpBuffer(context, &scaled_bias_tensor_,\n conv_fwd_pd->bias_desc(), &scaled_bias_buf_);\n if (!scaled_bias_) {\n scaled_bias_ = new memory(bias_md, this->cpu_engine_, scaled_bias_buf_);\n } else {\n scaled_bias_->set_data_handle(scaled_bias_buf_);\n }\n auto reorder_desc =\n ReorderPd(this->cpu_engine_, input_bias_->get_desc(),\n this->cpu_engine_, scaled_bias_->get_desc(), bias_attr);\n CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,\n this->cpu_engine_, context);\n Tbias* bias_data =\n reinterpret_cast(scaled_bias_->get_data_handle());\n if (is_bias_const_)\n CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);\n return bias_data;\n }\n return GetCachedBias(context);\n#else\n if (std::is_same::value) {\n return static_cast(\n const_cast(bias_tensor.flat().data()));\n }\n const float min_input =\n context->input(min_input_idx_).template scalar()();\n const float max_input =\n context->input(max_input_idx_).template scalar()();\n const Tensor& min_filter_vector = context->input(min_filter_idx_);\n const Tensor& max_filter_vector = context->input(max_filter_idx_);\n if ((min_filter_vector.NumElements() == 0) ||\n (max_filter_vector.NumElements() == 0) ||\n (min_filter_vector.shape() != max_filter_vector.shape())) {\n TF_CHECK_OK(absl::FailedPreconditionError(\n \"`min_filter and max_filter` must have same\"\n \"shape and contain at least one element.\"));\n }\n const float* min_filter = min_filter_vector.flat().data();\n const float* max_filter = max_filter_vector.flat().data();\n const float int_const_scale_limit =\n (std::is_same::value) ? 255.0 * 127.0 : 127.0 * 127.0;\n size_t depth = min_filter_vector.NumElements();\n bool scales_are_valid = (depth == scales_.size());\n scales_.resize(depth);\n for (size_t i = 0; i < depth; ++i) {\n float tmp_scale =\n int_const_scale_limit /\n (std::max(std::abs(max_input), std::abs(min_input)) *\n std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));\n if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {\n scales_are_valid = false;\n }\n scales_[i] = tmp_scale;\n }\n if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {\n dnnl::primitive_attr reorder_attr;\n if (depth == 1) {\n reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);\n } else {\n reorder_attr.set_scales_mask(DNNL_ARG_DST, 1);\n }\n auto bias_md = memory::desc({static_cast(bias_tensor.NumElements())},\n MklDnnType(), memory::format_tag::x);\n void* bias_buf = static_cast(\n const_cast(bias_tensor.flat().data()));\n if (!input_bias_) {\n input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);\n } else {\n input_bias_->set_data_handle(bias_buf);\n }\n if (!scaled_bias_buf_) {\n AllocTmpBuffer(context, &scaled_bias_tensor_,\n conv_fwd_pd->bias_desc(), &scaled_bias_buf_);\n }\n if (!scaled_bias_) {\n scaled_bias_ = new memory(conv_fwd_pd->bias_desc(), this->cpu_engine_,\n scaled_bias_buf_);\n } else {\n scaled_bias_->set_data_handle(scaled_bias_buf_);\n }\n std::unique_ptr scale_mem(\n new memory({{static_cast(depth)},\n MklDnnType(),\n memory::format_tag::x},\n this->cpu_engine_, scales_.data()));\n auto reorder_desc =\n ReorderPd(this->cpu_engine_, input_bias_->get_desc(),\n this->cpu_engine_, scaled_bias_->get_desc(), reorder_attr);\n CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,\n this->cpu_engine_, context, scale_mem.get());\n float* bias_data =\n reinterpret_cast(scaled_bias_->get_data_handle());\n if (is_bias_const_)\n CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);\n return bias_data;\n }\n return GetCachedBias(context);\n#endif \n }\n bool is_bias_const_;\n Tensor cached_bias_data_ TF_GUARDED_BY(bias_cache_mu_);\n memory* input_bias_ = nullptr;\n memory* scaled_bias_ = nullptr;\n Tensor scaled_bias_tensor_;\n void* scaled_bias_buf_ = nullptr;\n private:\n std::vector scales_;\n mutex bias_cache_mu_;\n std::vector fused_ops_;\n std::map post_op_to_idx_;\n int64_t fused_op_flags_ = 0;\n std::unordered_map str_to_enum_{\n {\"BiasAdd\", oneDNNFusedOps::kBias},\n {\"Sum\", oneDNNFusedOps::kSum},\n {\"Relu\", oneDNNFusedOps::kRelu},\n {\"Requantize\", oneDNNFusedOps::kRequantize}};\n std::shared_ptr summand_;\n std::shared_ptr dst_;\n int min_input_idx_ = -1;\n int max_input_idx_ = -1;\n int min_filter_idx_ = -1;\n int max_filter_idx_ = -1;\n int min_bias_idx_ = -1;\n int max_bias_idx_ = -1;\n int min_summand_idx_ = -1;\n int max_summand_idx_ = -1;\n int min_freezed_output_idx_ = -1;\n int max_freezed_output_idx_ = -1;\n inline bool IsFused(oneDNNFusedOps op) {\n return fused_op_flags_ & (static_cast(op));\n }\n inline oneDNNFusedOps StrToEnum(const string op) {\n CHECK_EQ(str_to_enum_.find(op) != str_to_enum_.end(), true) \n << \"Error: Unknown post op: \" << op;\n return str_to_enum_[op];\n }\n void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,\n Tensor** bias_tensor) {\n DCHECK(bias_tensor);\n TensorShape bias_tf_shape;\n bias_tf_shape.AddDim(\n (conv_prim_desc.bias_desc().get_size() / sizeof(TSCALED_BIAS)));\n OP_REQUIRES_OK(context,\n context->allocate_temp(DataTypeToEnum::value,\n bias_tf_shape, &cached_bias_data_));\n *bias_tensor = &cached_bias_data_;\n }\n inline bool IsBiasCacheEmpty(OpKernelContext* context)\n TF_LOCKS_EXCLUDED(bias_cache_mu_) {\n tf_shared_lock lock(bias_cache_mu_);\n return (cached_bias_data_.NumElements() == 0);\n }\n void CacheBias(OpKernelContext* context,\n const std::shared_ptr& conv_fwd_pd,\n TSCALED_BIAS* bias_data, const memory* scaled_bias)\n TF_LOCKS_EXCLUDED(bias_cache_mu_) {\n mutex_lock lock(bias_cache_mu_);\n if (cached_bias_data_.NumElements() > 0) {\n return;\n }\n Tensor* bias_tensor_ptr = nullptr;\n AllocateTensor(context, *conv_fwd_pd, &bias_tensor_ptr);\n void* cached_bias_data = const_cast(\n static_cast(bias_tensor_ptr->flat().data()));\n size_t cached_bias_data_size = scaled_bias->get_desc().get_size();\n memcpy(cached_bias_data, bias_data, cached_bias_data_size);\n }\n TSCALED_BIAS* GetCachedBias(OpKernelContext* context)\n TF_LOCKS_EXCLUDED(bias_cache_mu_) {\n tf_shared_lock lock(bias_cache_mu_);\n const Tensor& cached_bias_data = cached_bias_data_;\n return static_cast(const_cast(\n cached_bias_data.flat().data()));\n }\n};\ntemplate \nclass MklFusedConv3DOp\n : public MklConvOp {\n public:\n explicit MklFusedConv3DOp(OpKernelConstruction* context)\n : MklConvOp(context) {\n std::vector fused_ops;\n OP_REQUIRES_OK(context, context->GetAttr(\"fused_ops\", &fused_ops));\n int num_args;\n OP_REQUIRES_OK(context, context->GetAttr(\"num_args\", &num_args));\n std::vector padding_list;\n OP_REQUIRES_OK(context, context->GetAttr(\"padding_list\", &padding_list));\n if (padding_list.empty()) {\n OP_REQUIRES(\n context, !fused_ops.empty(),\n absl::InvalidArgumentError(\"Fused Conv3D must have at least one \"\n \"fused op when Pad is not fused.\"));\n if (std::find(fused_ops.begin(), fused_ops.end(), \"BiasAdd\") ==\n fused_ops.end()) {\n OP_REQUIRES(context, num_args == 1,\n absl::InvalidArgumentError(\n \"Fused Conv3D must have one extra argument: bias.\"));\n } else if (std::find(fused_ops.begin(), fused_ops.end(), \"BiasAdd\") ==\n fused_ops.end() &&\n std::find(fused_ops.begin(), fused_ops.end(), \"Add\") ==\n fused_ops.end()) {\n OP_REQUIRES(\n context, num_args == 2,\n absl::InvalidArgumentError(\n \"Fused Conv3D must have two extra arguments: bias and add.\"));\n }\n }\n if (fused_ops == std::vector{\"BiasAdd\"}) {\n this->set_fuse_biasadd(true);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"LeakyRelu\"}) {\n this->set_fuse_biasadd(true);\n float leakyrelu_alpha;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Mish\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Relu6\"}) {\n this->set_fuse_biasadd(true);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Elu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Relu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Relu6\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->SET_FUSE_ACTIVATION_FOR_RELU6;\n } else if (fused_ops == std::vector{\"BiasAdd\", \"Add\", \"Elu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);\n } else if (fused_ops ==\n std::vector{\"BiasAdd\", \"Add\", \"LeakyRelu\"}) {\n this->set_fuse_biasadd(true);\n this->set_fuse_add(true);\n float leakyrelu_alpha;\n OP_REQUIRES_OK(context,\n context->GetAttr(\"leakyrelu_alpha\", &leakyrelu_alpha));\n this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,\n leakyrelu_alpha);\n } else {\n if (padding_list.empty()) {\n OP_REQUIRES(context, false,\n absl::UnimplementedError(\n absl::StrCat(\"Fusion is not implemented: [\",\n absl::StrJoin(fused_ops, \",\"), \"]\")));\n }\n }\n }\n virtual ~MklFusedConv3DOp() {}\n};\n#define REGISTER_MKL_KERNEL(op, kernel, input_type, bias_type, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops) \\\n REGISTER_KERNEL_BUILDER( \\\n Name(op) \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"Tinput\") \\\n .TypeConstraint(\"Tfilter\") BIAS_TYPE_CONSTRAINT(bias_type) \\\n SUMMAND_TYPE_CONSTRAINT(summand_type) \\\n .TypeConstraint(\"out_type\") LABEL, \\\n kernel TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops));\n#define REGISTER_MKL_KERNEL_ALL_INPUT_TYPES( \\\n op, kernel, bias_type, output_type, summand_type, is_depthwise, \\\n legacy_fused_ops, num_fused_ops) \\\n REGISTER_MKL_KERNEL(op, kernel, qint8, bias_type, output_type, summand_type, \\\n is_depthwise, legacy_fused_ops, num_fused_ops); \\\n REGISTER_MKL_KERNEL(op, kernel, quint8, bias_type, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops);\n#define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( \\\n op, kernel, input_type, output_type, summand_type, is_depthwise, \\\n legacy_fused_ops, num_fused_ops) \\\n REGISTER_MKL_KERNEL(op, kernel, input_type, qint32, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops); \\\n REGISTER_MKL_KERNEL(op, kernel, input_type, float, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops);\n#define REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( \\\n op, kernel, output_type, summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops) \\\n REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, qint32, output_type, \\\n summand_type, is_depthwise, \\\n legacy_fused_ops, num_fused_ops); \\\n REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, float, output_type, \\\n summand_type, is_depthwise, \\\n legacy_fused_ops, num_fused_ops);\n#define LABEL\n#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \\\n summand_type, has_bias, is_depthwise, is_native)\n#define BIAS_TYPE_CONSTRAINT(bias_type)\n#define SUMMAND_TYPE_CONSTRAINT(summand_type)\nREGISTER_MKL_KERNEL(\"QuantizedConv2D\", NoOp, quint8, float, qint32, qint32,\n false, false, false);\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"QuantizedConv2DWithBias\", NoOp, float,\n qint32, qint32, false, false, false);\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"QuantizedConv2DWithBiasAndRelu\", NoOp,\n float, qint32, qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedConv2DWithBiasSumAndRelu\", NoOp, quint8, float,\n qint32, qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedConv2DAndRequantize\", NoOp, quint8, float, qint8,\n qint8, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedConv2DPerChannel\", NoOp, quint8, float, qint32,\n qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedConv2DAndRelu\", NoOp, quint8, float, qint32,\n qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedConv2DAndReluAndRequantize\", NoOp, quint8, float,\n quint8, quint8, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedDepthwiseConv2D\", NoOp, quint8, float, qint32,\n qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedDepthwiseConv2DWithBias\", NoOp, quint8, float,\n qint32, qint32, false, false, false);\nREGISTER_MKL_KERNEL(\"QuantizedDepthwiseConv2DWithBiasAndRelu\", NoOp, quint8,\n float, qint32, qint32, false, false, false);\n#undef SUMMAND_TYPE_CONSTRAINT\n#undef BIAS_TYPE_CONSTRAINT\n#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint(\"Tbias\")\n#define SUMMAND_TYPE_CONSTRAINT(summand_type)\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\n \"QuantizedConv2DWithBiasAndRequantize\", NoOp, qint8, qint8, false, false,\n false);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\n \"QuantizedConv2DWithBiasAndReluAndRequantize\", NoOp, quint8, quint8, false,\n false, false);\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize\", NoOp, quint8,\n quint8, quint8, false, false, false);\n#undef SUMMAND_TYPE_CONSTRAINT\n#define SUMMAND_TYPE_CONSTRAINT(summand_type) \\\n .TypeConstraint(\"Tsummand\")\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"QuantizedConv2DWithBiasSumAndReluAndRequantize\", NoOp, quint8, quint8,\n quint8, false, false, false);\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize\", NoOp, quint8,\n quint8, qint8, false, false, false);\n#undef SUMMAND_TYPE_CONSTRAINT\n#undef BIAS_TYPE_CONSTRAINT\n#undef TEMPLATE_ARGS\n#undef LABEL\n#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \\\n summand_type, is_depthwise, legacy_fused_ops, \\\n num_fused_ops) \\\n\n#define BIAS_TYPE_CONSTRAINT(bias_type)\n#define SUMMAND_TYPE_CONSTRAINT(summand_type)\n#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"_MklQuantizedConv2D\", MklQuantizedConvOp,\n float, qint32, qint32, false,\n quantized_fusions::none, 0);\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"_MklQuantizedConv2DPerChannel\",\n MklQuantizedConvOp, float, qint32, qint32,\n false, quantized_fusions::none, 0);\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"_MklQuantizedConv2DWithBias\",\n MklQuantizedConvOp, float, qint32, qint32,\n false, quantized_fusions::bias, 1);\nREGISTER_MKL_KERNEL_ALL_INPUT_TYPES(\"_MklQuantizedConv2DWithBiasAndRelu\",\n MklQuantizedConvOp, float, qint32, qint32,\n false, quantized_fusions::bias_relu, 2);\nREGISTER_MKL_KERNEL(\"_MklQuantizedConv2DWithBiasSumAndRelu\", MklQuantizedConvOp,\n quint8, float, qint32, qint32, false,\n quantized_fusions::bias_sum_relu, 3);\nREGISTER_MKL_KERNEL(\"_MklQuantizedConv2DAndRequantize\", MklQuantizedConvOp,\n quint8, float, qint8, qint8, false,\n quantized_fusions::requantize, 1);\nREGISTER_MKL_KERNEL(\"_MklQuantizedConv2DAndRelu\", MklQuantizedConvOp, quint8,\n float, qint32, qint32, false, quantized_fusions::relu, 1);\nREGISTER_MKL_KERNEL(\"_MklQuantizedConv2DAndReluAndRequantize\",\n MklQuantizedConvOp, quint8, float, quint8, quint8, false,\n quantized_fusions::relu_requantize, 2);\nREGISTER_MKL_KERNEL(\"_MklQuantizedDepthwiseConv2D\", MklQuantizedConvOp, quint8,\n float, qint32, qint32, true, quantized_fusions::none, 0);\nREGISTER_MKL_KERNEL(\"_MklQuantizedDepthwiseConv2DWithBias\", MklQuantizedConvOp,\n quint8, float, qint32, qint32, true,\n quantized_fusions::bias, 1);\nREGISTER_MKL_KERNEL(\"_MklQuantizedDepthwiseConv2DWithBiasAndRelu\",\n MklQuantizedConvOp, quint8, float, qint32, qint32, true,\n quantized_fusions::bias_relu, 2);\n#undef SUMMAND_TYPE_CONSTRAINT\n#undef BIAS_TYPE_CONSTRAINT\n#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint(\"Tbias\")\n#define SUMMAND_TYPE_CONSTRAINT(summand_type)\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\n \"_MklQuantizedConv2DWithBiasAndRequantize\", MklQuantizedConvOp, qint8,\n qint8, false, quantized_fusions::bias_requantize, 2);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\n \"_MklQuantizedConv2DWithBiasAndReluAndRequantize\", MklQuantizedConvOp,\n quint8, quint8, false, quantized_fusions::bias_relu_requantize, 3);\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"_MklQuantizedDepthwiseConv2DWithBiasAndReluAndRequantize\",\n MklQuantizedConvOp, quint8, quint8, quint8, true,\n quantized_fusions::bias_relu_requantize, 3);\n#undef LABEL\n#define LABEL\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedConv2D\",\n MklQuantizedConvOp, qint32, qint32,\n false, quantized_fusions::none, -1)\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedDepthwiseConv2D\",\n MklQuantizedConvOp, qint32, qint32,\n true, quantized_fusions::none, -1)\n#undef LABEL\n#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)\n#undef SUMMAND_TYPE_CONSTRAINT\n#define SUMMAND_TYPE_CONSTRAINT(summand_type) \\\n .TypeConstraint(\"Tsummand\")\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"_MklQuantizedConv2DWithBiasSumAndReluAndRequantize\", MklQuantizedConvOp,\n quint8, quint8, quint8, false, quantized_fusions::bias_sum_relu_requantize,\n 4);\nREGISTER_MKL_KERNEL_ALL_BIAS_TYPES(\n \"_MklQuantizedConv2DWithBiasSignedSumAndReluAndRequantize\",\n MklQuantizedConvOp, quint8, quint8, qint8, false,\n quantized_fusions::bias_sum_relu_requantize, 4);\n#undef LABEL\n#define LABEL\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedConv2D\",\n MklQuantizedConvOp, qint8, qint8,\n false, quantized_fusions::none,\n -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedConv2D\",\n MklQuantizedConvOp, quint8, qint8,\n false, quantized_fusions::none,\n -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedConv2D\",\n MklQuantizedConvOp, quint8, quint8,\n false, quantized_fusions::none,\n -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedConv2D\",\n MklQuantizedConvOp, qint8, quint8,\n false, quantized_fusions::none,\n -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedDepthwiseConv2D\",\n MklQuantizedConvOp, qint8, qint8,\n true, quantized_fusions::none, -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedDepthwiseConv2D\",\n MklQuantizedConvOp, quint8, qint8,\n true, quantized_fusions::none, -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedDepthwiseConv2D\",\n MklQuantizedConvOp, quint8, quint8,\n true, quantized_fusions::none, -1);\nREGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(\"_FusedQuantizedDepthwiseConv2D\",\n MklQuantizedConvOp, qint8, quint8,\n true, quantized_fusions::none, -1);\n#undef LABEL\n#undef SUMMAND_TYPE_CONSTRAINT\n#undef BIAS_TYPE_CONSTRAINT\n#undef TEMPLATE_ARGS\n#define REGISTER_NO_OP_CPU_2D_DEPTHWISE(T) \\\n REGISTER_KERNEL_BUILDER(Name(\"_FusedDepthwiseConv2dNative\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\"), \\\n NoOp);\nTF_CALL_float(REGISTER_NO_OP_CPU_2D_DEPTHWISE);\nTF_CALL_bfloat16(REGISTER_NO_OP_CPU_2D_DEPTHWISE);\nTF_CALL_half(REGISTER_NO_OP_CPU_2D_DEPTHWISE);\n#define REGISTER_MKL_CPU_2D(T) \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklConv2DWithBias\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"__MklDummyConv2DWithBias\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklDummyOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklPadWithConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklPadWithConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"__MklDummyPadWithConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklDummyOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeConv2DWithBias\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativePadWithConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativePadWithConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp);\nTF_CALL_float(REGISTER_MKL_CPU_2D);\nTF_CALL_bfloat16(REGISTER_MKL_CPU_2D);\nTF_CALL_half(REGISTER_MKL_CPU_2D);\n#define REGISTER_MKL_CPU_2D_DEPTHWISE(T) \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklDepthwiseConv2dNative\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklFusedDepthwiseConv2dNative\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklFusedDepthwiseConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeFusedDepthwiseConv2dNative\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklFusedDepthwiseConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeDepthwiseConv2dNative\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp);\nTF_CALL_float(REGISTER_MKL_CPU_2D_DEPTHWISE);\nTF_CALL_bfloat16(REGISTER_MKL_CPU_2D_DEPTHWISE);\nTF_CALL_half(REGISTER_MKL_CPU_2D_DEPTHWISE);\n#define REGISTER_MKL_CPU_2D_FUSED(T) \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklFusedConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklPadWithFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"Tpaddings\") \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklFusedConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklPadWithFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklFusedConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"__MklDummyPadWithFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklDummyOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklFusedConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativePadWithFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"Tpaddings\") \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklFusedConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativePadWithFusedConv2D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .TypeConstraint(\"Tpaddings\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklFusedConvOp);\nTF_CALL_float(REGISTER_MKL_CPU_2D_FUSED);\nTF_CALL_bfloat16(REGISTER_MKL_CPU_2D_FUSED);\nTF_CALL_half(REGISTER_MKL_CPU_2D_FUSED);\n#define REGISTER_MKL_CPU_3D(T) \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklConv3D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeConv3D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklConvOp); \\\n REGISTER_KERNEL_BUILDER( \\\n Name(\"_MklNativeFusedConv3D\") \\\n .Device(DEVICE_CPU) \\\n .TypeConstraint(\"T\") \\\n .Label(mkl_op_registry::kMklNameChangeOpLabel), \\\n MklFusedConv3DOp);\nTF_CALL_float(REGISTER_MKL_CPU_3D);\nTF_CALL_bfloat16(REGISTER_MKL_CPU_3D);\nTF_CALL_half(REGISTER_MKL_CPU_3D);\n#undef APPEND_DEPTHWISE\n#undef APPEND_ELTWISE\n#undef GET_DATA_TYPE\n#undef SET_FUSE_ACTIVATION_FOR_RELU6\n#undef SET_MKL_LAYOUT\n#undef OUTPUT_SCALE_DCHECK\n#undef TSCALED_BIAS\n#undef SCALE\n#undef SUMMAND_SCALE_U8\n#undef SUMMAND_SCALE_S8\n} \n#endif "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/cc/ops/const_op.h\"\n#include \"tensorflow/cc/ops/nn_ops.h\"\n#include \"tensorflow/cc/ops/standard_ops.h\"\n#include \"tensorflow/core/common_runtime/kernel_benchmark_testlib.h\"\n#include \"tensorflow/core/framework/fake_input.h\"\n#include \"tensorflow/core/framework/node_def_builder.h\"\n#include \"tensorflow/core/framework/tensor.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/kernels/ops_testutil.h\"\n#include \"tensorflow/core/kernels/ops_util.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/core/platform/test_benchmark.h\"\n#include \"tensorflow/core/public/session.h\"\n#include \"tensorflow/core/util/mkl_util.h\"\nnamespace tensorflow {\nstruct Conv2DDimensions {\n Conv2DDimensions(int n, int h, int w, int c, int fc, int fh, int fw)\n : input_batches(n),\n input_height(h),\n input_width(w),\n input_depth(c),\n filter_count(fc),\n filter_height(fh),\n filter_width(fw) {}\n int input_batches;\n int input_height;\n int input_width;\n int input_depth;\n int filter_count;\n int filter_height;\n int filter_width;\n};\nstatic Tensor GetRandomTensor(const TensorShape& shape) {\n Tensor tensor(DT_FLOAT, TensorShape(shape));\n tensor.flat() = tensor.flat().setRandom();\n return tensor;\n}\nstatic Tensor GetRandomInputTensor(const Conv2DDimensions& dims) {\n return GetRandomTensor({dims.input_batches, dims.input_height,\n dims.input_width, dims.input_depth});\n}\nstatic Tensor GetRandomFilterTensor(const Conv2DDimensions& dims) {\n return GetRandomTensor({dims.filter_height, dims.filter_width,\n dims.input_depth, dims.filter_count});\n}\nstatic Tensor GetRandomOutputTensor(const Conv2DDimensions& dims) {\n return GetRandomTensor({dims.input_batches, dims.input_height,\n dims.input_width, dims.filter_count});\n}\nstatic Tensor GetInputSizesTensor(const Conv2DDimensions& dims) {\n return test::AsTensor({dims.input_batches, dims.input_height,\n dims.input_width, dims.input_depth});\n}\nstatic Tensor GetFilterSizesTensor(const Conv2DDimensions& dims) {\n return test::AsTensor({dims.filter_height, dims.filter_width,\n dims.input_depth, dims.filter_count});\n}\nstatic Graph* DefaultConv2D(const Conv2DDimensions& dims) {\n auto* graph = new Graph(OpRegistry::Global());\n Tensor input_t = GetRandomInputTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Node* input = test::graph::Constant(graph, input_t, \"input\");\n Node* filter = test::graph::Constant(graph, filter_t, \"filter\");\n Node* conv2d;\n TF_CHECK_OK(NodeBuilder(graph->NewName(\"conv_2d\"), \"Conv2D\")\n .Input(input)\n .Input(filter)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Finalize(graph, &conv2d));\n return graph;\n}\nstatic Graph* MklConv2D(const Conv2DDimensions& dims) {\n auto* graph = new Graph(OpRegistry::Global());\n Tensor input_t = GetRandomInputTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Node* input = test::graph::Constant(graph, input_t, \"input\");\n Node* filter = test::graph::Constant(graph, filter_t, \"filter\");\n Node* not_mkl_shape =\n test::graph::Constant(graph, GetMklMetaTensor(), \"not_mkl\");\n Node* conv2d;\n TF_CHECK_OK(NodeBuilder(graph->NewName(\"mkl_conv_2d\"), \"_MklConv2D\")\n .Input(input)\n .Input(filter)\n .Input(not_mkl_shape)\n .Input(not_mkl_shape)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Attr(\"_kernel\", \"MklOp\")\n .Finalize(graph, &conv2d));\n return graph;\n}\nstatic Graph* DefaultConv2DBwdInput(const Conv2DDimensions& dims) {\n auto* graph = new Graph(OpRegistry::Global());\n Tensor input_sizes_t = GetInputSizesTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Tensor out_backprop_t = GetRandomOutputTensor(dims); \n Node* input_sizes =\n test::graph::Constant(graph, input_sizes_t, \"input_sizes\");\n Node* filter = test::graph::Constant(graph, filter_t, \"filter\");\n Node* out_backprop =\n test::graph::Constant(graph, out_backprop_t, \"out_backprop\");\n Node* conv2d_bwd_input;\n TF_CHECK_OK(\n NodeBuilder(graph->NewName(\"conv_2d_bwd_input\"), \"Conv2DBackpropInput\")\n .Input(input_sizes)\n .Input(filter)\n .Input(out_backprop)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Finalize(graph, &conv2d_bwd_input));\n return graph;\n}\nstatic Graph* MklConv2DBwdInput(const Conv2DDimensions& dims) {\n auto* graph = new Graph(OpRegistry::Global());\n Tensor input_sizes_t = GetInputSizesTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Tensor out_backprop_t = GetRandomOutputTensor(dims); \n Node* input_sizes =\n test::graph::Constant(graph, input_sizes_t, \"input_sizes\");\n Node* filter = test::graph::Constant(graph, filter_t, \"filter\");\n Node* out_backprop =\n test::graph::Constant(graph, out_backprop_t, \"out_backprop\");\n Node* not_mkl_shape =\n test::graph::Constant(graph, GetMklMetaTensor(), \"not_mkl\");\n Node* conv2d_bwd_input;\n TF_CHECK_OK(NodeBuilder(graph->NewName(\"conv_2d_bwd_input\"),\n \"_MklConv2DBackpropInput\")\n .Input(input_sizes)\n .Input(filter)\n .Input(out_backprop)\n .Input(not_mkl_shape)\n .Input(not_mkl_shape)\n .Input(not_mkl_shape)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Attr(\"_kernel\", \"MklOp\")\n .Finalize(graph, &conv2d_bwd_input));\n return graph;\n}\nstatic Graph* DefaultConv2DBwdFilter(const Conv2DDimensions& dims) {\n auto* graph = new Graph(OpRegistry::Global());\n Tensor input_t = GetRandomInputTensor(dims);\n Tensor filter_sizes_t = GetFilterSizesTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Tensor out_backprop_t = GetRandomOutputTensor(dims); \n Node* input = test::graph::Constant(graph, input_t, \"input\");\n Node* filter_sizes =\n test::graph::Constant(graph, filter_sizes_t, \"filter_sizes\");\n Node* out_backprop =\n test::graph::Constant(graph, out_backprop_t, \"out_backprop\");\n Node* conv2d_bwd_filter;\n TF_CHECK_OK(\n NodeBuilder(graph->NewName(\"conv_2d_bwd_filter\"), \"Conv2DBackpropFilter\")\n .Input(input)\n .Input(filter_sizes)\n .Input(out_backprop)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Finalize(graph, &conv2d_bwd_filter));\n return graph;\n}\nstatic Graph* MklConv2DBwdFilter(const Conv2DDimensions& dims) {\n Graph* graph = new Graph(OpRegistry::Global());\n Tensor input_t = GetRandomInputTensor(dims);\n Tensor filter_sizes_t = GetFilterSizesTensor(dims);\n Tensor filter_t = GetRandomFilterTensor(dims);\n Tensor out_backprop_t = GetRandomOutputTensor(dims); \n Node* input = test::graph::Constant(graph, input_t, \"input\");\n Node* filter_sizes =\n test::graph::Constant(graph, filter_sizes_t, \"filter_sizes\");\n Node* out_backprop =\n test::graph::Constant(graph, out_backprop_t, \"out_backprop\");\n Node* not_mkl_shape =\n test::graph::Constant(graph, GetMklMetaTensor(), \"not_mkl\");\n Node* conv2d_bwd_filter;\n TF_CHECK_OK(NodeBuilder(graph->NewName(\"conv_2d_bwd_filter\"),\n \"_MklConv2DBackpropFilter\")\n .Input(input)\n .Input(filter_sizes)\n .Input(out_backprop)\n .Input(not_mkl_shape)\n .Input(not_mkl_shape)\n .Input(not_mkl_shape)\n .Attr(\"T\", DT_FLOAT)\n .Attr(\"strides\", {1, 1, 1, 1})\n .Attr(\"padding\", \"SAME\")\n .Attr(\"_kernel\", \"MklOp\")\n .Finalize(graph, &conv2d_bwd_filter));\n return graph;\n}\n#define BM_CONCAT(a, b) a##b\n#define BM_NAME(p, type, N, H, W, C, FC, FH, FW) \\\n BM_CONCAT(BM_##p##_##type##_in_##N##_##H##_##W##_##C, _f_##FC##_##FH##_##FW)\n#define BM_Conv2DT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \\\n static void BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, \\\n FW)(::testing::benchmark::State & state) { \\\n state.SetLabel(LABEL); \\\n \\\n int64 num_computed_elements = (N) * (H) * (W) * (FC); \\\n int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \\\n \\\n Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \\\n test::Benchmark(#type, BM_CONCAT(kind, Conv2D)(dims), \\\n false) \\\n .Run(state); \\\n state.SetItemsProcessed(state.iterations() * flops_per_iter); \\\n } \\\n BENCHMARK(BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, FW))\n#define BM_Conv2D(N, H, W, C, FC, FH, FW, type, LABEL) \\\n BM_Conv2DT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \\\n BM_Conv2DT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);\n#define BM_Conv2DBwdInputT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \\\n static void BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, \\\n FW)(::testing::benchmark::State & state) { \\\n state.SetLabel(LABEL); \\\n \\\n int64 num_computed_elements = (N) * (H) * (W) * (C); \\\n int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \\\n \\\n Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \\\n test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdInput)(dims), \\\n false) \\\n .Run(state); \\\n state.SetItemsProcessed(state.iterations() * flops_per_iter); \\\n } \\\n BENCHMARK(BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, FW))\n#define BM_Conv2DBwdInput(N, H, W, C, FC, FH, FW, type, LABEL) \\\n BM_Conv2DBwdInputT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \\\n BM_Conv2DBwdInputT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);\n#define BM_Conv2DBwdFilterT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \\\n static void BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, \\\n FW)(::testing::benchmark::State & state) { \\\n state.SetLabel(LABEL); \\\n \\\n int64 num_computed_elements = (FH) * (FW) * (C) * (FC); \\\n int64 flops_per_iter = num_computed_elements * ((N) * (H) * (W)); \\\n \\\n Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \\\n test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdFilter)(dims), \\\n false) \\\n .Run(state); \\\n state.SetItemsProcessed(state.iterations() * flops_per_iter); \\\n } \\\n BENCHMARK(BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, FW))\n#define BM_Conv2DBwdFilter(N, H, W, C, FC, FH, FW, type, LABEL) \\\n BM_Conv2DBwdFilterT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \\\n BM_Conv2DBwdFilterT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);\nBM_Conv2D(32, 28, 28, 96, 128, 3, 3, cpu, \"conv3a_00_3x3\");\nBM_Conv2D(32, 28, 28, 16, 32, 5, 5, cpu, \"conv3a_00_5x5\");\nBM_Conv2D(32, 28, 28, 128, 192, 3, 3, cpu, \"conv3_00_3x3\");\nBM_Conv2D(32, 28, 28, 32, 96, 5, 5, cpu, \"conv3_00_5x5\");\nBM_Conv2D(32, 14, 14, 96, 204, 3, 3, cpu, \"conv4a_00_3x3\");\nBM_Conv2D(32, 14, 14, 16, 48, 5, 5, cpu, \"conv4a_00_5x5\");\nBM_Conv2D(32, 14, 14, 112, 224, 3, 3, cpu, \"conv4b_00_3x3\");\nBM_Conv2DBwdInput(32, 28, 28, 96, 128, 3, 3, cpu, \"conv3a_00_3x3\");\nBM_Conv2DBwdInput(32, 28, 28, 16, 32, 5, 5, cpu, \"conv3a_00_5x5\");\nBM_Conv2DBwdInput(32, 28, 28, 128, 192, 3, 3, cpu, \"conv3_00_3x3\");\nBM_Conv2DBwdInput(32, 28, 28, 32, 96, 5, 5, cpu, \"conv3_00_5x5\");\nBM_Conv2DBwdInput(32, 14, 14, 96, 204, 3, 3, cpu, \"conv4a_00_3x3\");\nBM_Conv2DBwdInput(32, 14, 14, 16, 48, 5, 5, cpu, \"conv4a_00_5x5\");\nBM_Conv2DBwdInput(32, 14, 14, 112, 224, 3, 3, cpu, \"conv4b_00_3x3\");\nBM_Conv2DBwdFilter(32, 28, 28, 96, 128, 3, 3, cpu, \"conv3a_00_3x3\");\nBM_Conv2DBwdFilter(32, 28, 28, 16, 32, 5, 5, cpu, \"conv3a_00_5x5\");\nBM_Conv2DBwdFilter(32, 28, 28, 128, 192, 3, 3, cpu, \"conv3_00_3x3\");\nBM_Conv2DBwdFilter(32, 28, 28, 32, 96, 5, 5, cpu, \"conv3_00_5x5\");\nBM_Conv2DBwdFilter(32, 14, 14, 96, 204, 3, 3, cpu, \"conv4a_00_3x3\");\nBM_Conv2DBwdFilter(32, 14, 14, 16, 48, 5, 5, cpu, \"conv4a_00_5x5\");\nBM_Conv2DBwdFilter(32, 14, 14, 112, 224, 3, 3, cpu, \"conv4b_00_3x3\");\n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":287,"cells":{"ID":{"kind":"string","value":"beb94efe-689c-4864-a58d-c462f72b8745"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/leveldb"},"File Name":{"kind":"string","value":"filter_block"},"File Path in Repository":{"kind":"string","value":"table/filter_block.cc"},"File Path for Unit Test":{"kind":"string","value":"table/filter_block_test.cc"},"Code":{"kind":"string","value":"#include \"table/filter_block.h\"\n#include \"leveldb/filter_policy.h\"\n#include \"util/coding.h\"\nnamespace leveldb {\nstatic const size_t kFilterBaseLg = 11;\nstatic const size_t kFilterBase = 1 << kFilterBaseLg;\nFilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)\n : policy_(policy) {}\nvoid FilterBlockBuilder::StartBlock(uint64_t block_offset) {\n uint64_t filter_index = (block_offset / kFilterBase);\n assert(filter_index >= filter_offsets_.size());\n while (filter_index > filter_offsets_.size()) {\n GenerateFilter();\n }\n}\nvoid FilterBlockBuilder::AddKey(const Slice& key) {\n Slice k = key;\n start_.push_back(keys_.size());\n keys_.append(k.data(), k.size());\n}\nSlice FilterBlockBuilder::Finish() {\n if (!start_.empty()) {\n GenerateFilter();\n }\n const uint32_t array_offset = result_.size();\n for (size_t i = 0; i < filter_offsets_.size(); i++) {\n PutFixed32(&result_, filter_offsets_[i]);\n }\n PutFixed32(&result_, array_offset);\n result_.push_back(kFilterBaseLg); \n return Slice(result_);\n}\nvoid FilterBlockBuilder::GenerateFilter() {\n const size_t num_keys = start_.size();\n if (num_keys == 0) {\n filter_offsets_.push_back(result_.size());\n return;\n }\n start_.push_back(keys_.size()); \n tmp_keys_.resize(num_keys);\n for (size_t i = 0; i < num_keys; i++) {\n const char* base = keys_.data() + start_[i];\n size_t length = start_[i + 1] - start_[i];\n tmp_keys_[i] = Slice(base, length);\n }\n filter_offsets_.push_back(result_.size());\n policy_->CreateFilter(&tmp_keys_[0], static_cast(num_keys), &result_);\n tmp_keys_.clear();\n keys_.clear();\n start_.clear();\n}\nFilterBlockReader::FilterBlockReader(const FilterPolicy* policy,\n const Slice& contents)\n : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {\n size_t n = contents.size();\n if (n < 5) return; \n base_lg_ = contents[n - 1];\n uint32_t last_word = DecodeFixed32(contents.data() + n - 5);\n if (last_word > n - 5) return;\n data_ = contents.data();\n offset_ = data_ + last_word;\n num_ = (n - 5 - last_word) / 4;\n}\nbool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {\n uint64_t index = block_offset >> base_lg_;\n if (index < num_) {\n uint32_t start = DecodeFixed32(offset_ + index * 4);\n uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);\n if (start <= limit && limit <= static_cast(offset_ - data_)) {\n Slice filter = Slice(data_ + start, limit - start);\n return policy_->KeyMayMatch(key, filter);\n } else if (start == limit) {\n return false;\n }\n }\n return true; \n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"table/filter_block.h\"\n#include \"gtest/gtest.h\"\n#include \"leveldb/filter_policy.h\"\n#include \"util/coding.h\"\n#include \"util/hash.h\"\n#include \"util/logging.h\"\n#include \"util/testutil.h\"\nnamespace leveldb {\nclass TestHashFilter : public FilterPolicy {\n public:\n const char* Name() const override { return \"TestHashFilter\"; }\n void CreateFilter(const Slice* keys, int n, std::string* dst) const override {\n for (int i = 0; i < n; i++) {\n uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);\n PutFixed32(dst, h);\n }\n }\n bool KeyMayMatch(const Slice& key, const Slice& filter) const override {\n uint32_t h = Hash(key.data(), key.size(), 1);\n for (size_t i = 0; i + 4 <= filter.size(); i += 4) {\n if (h == DecodeFixed32(filter.data() + i)) {\n return true;\n }\n }\n return false;\n }\n};\nclass FilterBlockTest : public testing::Test {\n public:\n TestHashFilter policy_;\n};\nTEST_F(FilterBlockTest, EmptyBuilder) {\n FilterBlockBuilder builder(&policy_);\n Slice block = builder.Finish();\n ASSERT_EQ(\"\\\\x00\\\\x00\\\\x00\\\\x00\\\\x0b\", EscapeString(block));\n FilterBlockReader reader(&policy_, block);\n ASSERT_TRUE(reader.KeyMayMatch(0, \"foo\"));\n ASSERT_TRUE(reader.KeyMayMatch(100000, \"foo\"));\n}\nTEST_F(FilterBlockTest, SingleChunk) {\n FilterBlockBuilder builder(&policy_);\n builder.StartBlock(100);\n builder.AddKey(\"foo\");\n builder.AddKey(\"bar\");\n builder.AddKey(\"box\");\n builder.StartBlock(200);\n builder.AddKey(\"box\");\n builder.StartBlock(300);\n builder.AddKey(\"hello\");\n Slice block = builder.Finish();\n FilterBlockReader reader(&policy_, block);\n ASSERT_TRUE(reader.KeyMayMatch(100, \"foo\"));\n ASSERT_TRUE(reader.KeyMayMatch(100, \"bar\"));\n ASSERT_TRUE(reader.KeyMayMatch(100, \"box\"));\n ASSERT_TRUE(reader.KeyMayMatch(100, \"hello\"));\n ASSERT_TRUE(reader.KeyMayMatch(100, \"foo\"));\n ASSERT_TRUE(!reader.KeyMayMatch(100, \"missing\"));\n ASSERT_TRUE(!reader.KeyMayMatch(100, \"other\"));\n}\nTEST_F(FilterBlockTest, MultiChunk) {\n FilterBlockBuilder builder(&policy_);\n builder.StartBlock(0);\n builder.AddKey(\"foo\");\n builder.StartBlock(2000);\n builder.AddKey(\"bar\");\n builder.StartBlock(3100);\n builder.AddKey(\"box\");\n builder.StartBlock(9000);\n builder.AddKey(\"box\");\n builder.AddKey(\"hello\");\n Slice block = builder.Finish();\n FilterBlockReader reader(&policy_, block);\n ASSERT_TRUE(reader.KeyMayMatch(0, \"foo\"));\n ASSERT_TRUE(reader.KeyMayMatch(2000, \"bar\"));\n ASSERT_TRUE(!reader.KeyMayMatch(0, \"box\"));\n ASSERT_TRUE(!reader.KeyMayMatch(0, \"hello\"));\n ASSERT_TRUE(reader.KeyMayMatch(3100, \"box\"));\n ASSERT_TRUE(!reader.KeyMayMatch(3100, \"foo\"));\n ASSERT_TRUE(!reader.KeyMayMatch(3100, \"bar\"));\n ASSERT_TRUE(!reader.KeyMayMatch(3100, \"hello\"));\n ASSERT_TRUE(!reader.KeyMayMatch(4100, \"foo\"));\n ASSERT_TRUE(!reader.KeyMayMatch(4100, \"bar\"));\n ASSERT_TRUE(!reader.KeyMayMatch(4100, \"box\"));\n ASSERT_TRUE(!reader.KeyMayMatch(4100, \"hello\"));\n ASSERT_TRUE(reader.KeyMayMatch(9000, \"box\"));\n ASSERT_TRUE(reader.KeyMayMatch(9000, \"hello\"));\n ASSERT_TRUE(!reader.KeyMayMatch(9000, \"foo\"));\n ASSERT_TRUE(!reader.KeyMayMatch(9000, \"bar\"));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block_test.cc"},"Commit Hash":{"kind":"string","value":"23e35d792b9154f922b8b575b12596a4d8664c65"}}},{"rowIdx":288,"cells":{"ID":{"kind":"string","value":"464b7a60-847b-4ac6-bf92-ce88e1c0c36b"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"conditional_canonicalizer"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/conditional_canonicalizer.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/conditional_canonicalizer_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/conditional_canonicalizer.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/status_macros.h\"\nnamespace xla {\nnamespace {\nabsl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) {\n TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);\n for (auto* branch : conditional->called_computations()) {\n HloInstruction* root = branch->root_instruction();\n TF_RET_CHECK(!root->shape().IsTuple());\n HloInstruction* tuple =\n branch->AddInstruction(HloInstruction::CreateTuple({root}));\n branch->set_root_instruction(tuple, true);\n }\n auto parent = conditional->parent();\n const Shape& root_shape = conditional->shape();\n auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1));\n auto new_conditional =\n parent->AddInstruction(conditional->CloneWithNewShape(new_shape));\n auto gte = parent->AddInstruction(\n HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0));\n TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte));\n return absl::OkStatus();\n}\n} \nabsl::StatusOr ConditionalCanonicalizer::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n XLA_VLOG_LINES(\n 2, \"ConditionalCanonicalizer::Run(), before:\\n\" + module->ToString());\n bool changed = false;\n for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {\n for (auto* inst : comp->MakeInstructionPostOrder()) {\n if (inst->opcode() == HloOpcode::kConditional &&\n !inst->shape().IsTuple()) {\n TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst));\n changed = true;\n }\n }\n }\n XLA_VLOG_LINES(\n 2, \"ConditionalCanonicalizer::Run(), after:\\n\" + module->ToString());\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/conditional_canonicalizer.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_module.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_matchers.h\"\n#include \"xla/service/hlo_parser.h\"\n#include \"xla/shape_util.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tests/literal_test_util.h\"\n#include \"xla/tests/test_utils.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"xla/types.h\"\n#include \"xla/util.h\"\nnamespace xla {\nnamespace {\nnamespace op = xla::testing::opcode_matchers;\nclass ConditionalCanonicalizerTest : public HloTestBase {\n protected:\n ConditionalCanonicalizerTest() {}\n};\nTEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) {\n auto module = ParseAndReturnVerifiedModule(R\"(\nHloModule _\ntrue_branch {\n true_param = (s32[3,2]) parameter(0)\n ROOT root = s32[] constant(0)\n}\nfalse_branch {\n false_param = (s32[3,2]) parameter(0)\n ROOT root = s32[] constant(1)\n}\nENTRY entry {\n param0 = s32[3,2] parameter(0)\n branch = pred[] constant(false)\n param_tuple = (s32[3 ,2]) tuple(param0)\n ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple),\n true_computation=true_branch, false_computation=false_branch\n}\n)\")\n .value();\n ConditionalCanonicalizer pass;\n EXPECT_TRUE(pass.Run(module.get()).value());\n EXPECT_THAT(module->entry_computation()->root_instruction(),\n op::GetTupleElement(op::Conditional()));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":289,"cells":{"ID":{"kind":"string","value":"cf805915-470c-4ec5-9114-22bba8f23da3"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"io"},"File Path in Repository":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h\"\n#include \n#include \n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace stablehlo::quantization::io {\nabsl::StatusOr GetLocalTmpFileName(tsl::Env* const env) {\n std::string tmp_fname{};\n if (!env->LocalTempFilename(&tmp_fname)) {\n return absl::InternalError(\"Failed to create tmp file name.\");\n }\n return tmp_fname;\n}\nabsl::StatusOr GetLocalTmpFileName() {\n return GetLocalTmpFileName(tsl::Env::Default());\n}\nabsl::StatusOr CreateTmpDir(tsl::Env* const env) {\n TF_ASSIGN_OR_RETURN(std::string tmp_dir, GetLocalTmpFileName(env));\n if (!env->RecursivelyCreateDir(tmp_dir).ok()) {\n return absl::InternalError(\n absl::StrFormat(\"Failed to create tmp dir: '%s'\", tmp_dir));\n }\n return tmp_dir;\n}\nabsl::StatusOr CreateTmpDir() {\n return CreateTmpDir(tsl::Env::Default());\n}\nabsl::Status WriteStringToFile(const absl::string_view file_path,\n const absl::string_view data) {\n auto* env = tsl::Env::Default();\n return WriteStringToFile(env, std::string(file_path), data);\n}\nabsl::StatusOr ReadFileToString(\n const absl::string_view file_path) {\n auto* env = tsl::Env::Default();\n std::string data{};\n absl::Status read_status =\n ReadFileToString(env, std::string(file_path), &data);\n if (read_status.ok()) {\n return data;\n } else {\n return read_status;\n }\n}\nabsl::StatusOr> ListDirectory(\n absl::string_view directory) {\n std::vector children;\n TF_RETURN_IF_ERROR(\n tsl::Env::Default()->GetChildren(std::string(directory), &children));\n return children;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/functional/any_invocable.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"tsl/platform/env.h\"\n#include \"tsl/platform/file_system.h\"\n#include \"tsl/platform/status_matchers.h\"\n#include \"tsl/platform/types.h\"\nnamespace stablehlo::quantization::io {\nnamespace {\nusing ::testing::Eq;\nusing ::testing::HasSubstr;\nusing ::testing::IsEmpty;\nusing ::testing::Not;\nusing ::testing::SizeIs;\nusing ::testing::UnorderedElementsAre;\nusing ::tsl::testing::IsOk;\nusing ::tsl::testing::StatusIs;\nclass TestEnvBrokenFileSystem : public tsl::Env {\n public:\n TestEnvBrokenFileSystem() = default;\n bool MatchPath(const tsl::string& path, const tsl::string& pattern) override {\n return false;\n }\n void SleepForMicroseconds(int64_t micros) override {}\n tsl::string GetRunfilesDir() override { return tsl::string(\"dummy_path\"); }\n int32_t GetCurrentThreadId() override { return 0; }\n tsl::Thread* StartThread(const tsl::ThreadOptions& thread_options,\n const tsl::string& name,\n absl::AnyInvocable fn) override {\n return nullptr;\n }\n bool GetCurrentThreadName(tsl::string* name) override { return false; }\n void SchedClosure(absl::AnyInvocable closure) override {}\n void SchedClosureAfter(int64_t micros,\n absl::AnyInvocable closure) override {}\n absl::Status LoadDynamicLibrary(const char* library_filename,\n void** handle) override {\n return absl::OkStatus();\n }\n absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,\n void** symbol) override {\n return absl::OkStatus();\n }\n tsl::string FormatLibraryFileName(const tsl::string& name,\n const tsl::string& version) override {\n return tsl::string(\"dummy_path\");\n }\n absl::Status GetFileSystemForFile(const std::string& fname,\n tsl::FileSystem** result) override {\n return absl::InternalError(\"Broken file system\");\n }\n private:\n void GetLocalTempDirectories(std::vector* list) override {\n list->push_back(\"/tmp\");\n }\n};\nclass TestEnvBrokenFileSystemAndNoLocalTempDirs\n : public TestEnvBrokenFileSystem {\n private:\n void GetLocalTempDirectories(std::vector* list) override {}\n};\nTEST(IoTest, GetLocalTmpFileNameGivesValidFileName) {\n absl::StatusOr tmp_file_name = GetLocalTmpFileName();\n ASSERT_THAT(tmp_file_name, IsOk());\n EXPECT_THAT(*tmp_file_name, Not(IsEmpty()));\n}\nTEST(IoTest, GetLocalTmpFileNameWhenNoTempDirsReturnsInternalError) {\n TestEnvBrokenFileSystemAndNoLocalTempDirs broken_env;\n absl::StatusOr tmp_file_name = GetLocalTmpFileName(&broken_env);\n EXPECT_THAT(tmp_file_name,\n StatusIs(absl::StatusCode::kInternal,\n HasSubstr(\"Failed to create tmp file name\")));\n}\nTEST(IoTest, CreateTmpDirReturnsValidTmpPath) {\n absl::StatusOr tmp_dir = CreateTmpDir();\n ASSERT_THAT(tmp_dir, IsOk());\n auto* const env = tsl::Env::Default();\n EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());\n}\nTEST(IoTest, CreateTmpDirWhenInvalidPathReturnsInternalError) {\n TestEnvBrokenFileSystem test_env{};\n absl::StatusOr tmp_dir = CreateTmpDir(&test_env);\n EXPECT_THAT(tmp_dir, StatusIs(absl::StatusCode::kInternal,\n HasSubstr(\"Failed to create tmp dir\")));\n}\nTEST(IoTest, WriteStringToFile) {\n const std::string dst_file_path =\n absl::StrCat(testing::TempDir(), \"/tmp_file\");\n const absl::Status write_status =\n WriteStringToFile(dst_file_path, \"test_string\");\n ASSERT_THAT(write_status, IsOk());\n auto* const env = tsl::Env::Default();\n ASSERT_THAT(env->FileExists(dst_file_path), IsOk());\n std::string data{};\n ASSERT_THAT(tsl::ReadFileToString(env, dst_file_path, &data), IsOk());\n EXPECT_THAT(data, Eq(\"test_string\"));\n}\nTEST(IoTest, ReadFileToString) {\n const std::string src_file_path =\n absl::StrCat(testing::TempDir(), \"/tmp_file\");\n {\n std::ofstream ofs(src_file_path);\n ofs << \"test_string\";\n }\n const absl::StatusOr read_status =\n ReadFileToString(src_file_path);\n ASSERT_THAT(read_status, IsOk());\n EXPECT_THAT(*read_status, Eq(\"test_string\"));\n}\nTEST(IoTest, ListChildrenInDirectory) {\n absl::StatusOr tmp_dir = CreateTmpDir();\n ASSERT_THAT(tmp_dir, IsOk());\n auto* const env = tsl::Env::Default();\n EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());\n ASSERT_THAT(\n WriteStringToFile(absl::StrCat(*tmp_dir, \"/tmp_file1\"), \"test_string\"),\n IsOk());\n ASSERT_THAT(\n WriteStringToFile(absl::StrCat(*tmp_dir, \"/tmp_file2\"), \"test_string\"),\n IsOk());\n ASSERT_THAT(env->RecursivelyCreateDir(absl::StrCat(*tmp_dir, \"/subdir\")),\n IsOk());\n absl::StatusOr> children = ListDirectory(*tmp_dir);\n EXPECT_THAT(children, IsOk());\n EXPECT_THAT(children.value(), SizeIs(3));\n EXPECT_THAT(children.value(),\n UnorderedElementsAre(\"subdir\", \"tmp_file1\", \"tmp_file2\"));\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":290,"cells":{"ID":{"kind":"string","value":"7aae7bdd-3782-4a7a-a22a-065ac38df147"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"quic_unacked_packet_map"},"File Path in Repository":{"kind":"string","value":"quiche/quic/core/quic_unacked_packet_map.cc"},"File Path for Unit Test":{"kind":"string","value":"quiche/quic/core/quic_unacked_packet_map_test.cc"},"Code":{"kind":"string","value":"#include \"quiche/quic/core/quic_unacked_packet_map.h\"\n#include \n#include \n#include \n#include \n#include \"absl/container/inlined_vector.h\"\n#include \"quiche/quic/core/quic_connection_stats.h\"\n#include \"quiche/quic/core/quic_packet_number.h\"\n#include \"quiche/quic/core/quic_types.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/platform/api/quic_bug_tracker.h\"\n#include \"quiche/quic/platform/api/quic_flag_utils.h\"\nnamespace quic {\nnamespace {\nbool WillStreamFrameLengthSumWrapAround(QuicPacketLength lhs,\n QuicPacketLength rhs) {\n static_assert(\n std::is_unsigned::value,\n \"This function assumes QuicPacketLength is an unsigned integer type.\");\n return std::numeric_limits::max() - lhs < rhs;\n}\nenum QuicFrameTypeBitfield : uint32_t {\n kInvalidFrameBitfield = 0,\n kPaddingFrameBitfield = 1,\n kRstStreamFrameBitfield = 1 << 1,\n kConnectionCloseFrameBitfield = 1 << 2,\n kGoawayFrameBitfield = 1 << 3,\n kWindowUpdateFrameBitfield = 1 << 4,\n kBlockedFrameBitfield = 1 << 5,\n kStopWaitingFrameBitfield = 1 << 6,\n kPingFrameBitfield = 1 << 7,\n kCryptoFrameBitfield = 1 << 8,\n kHandshakeDoneFrameBitfield = 1 << 9,\n kStreamFrameBitfield = 1 << 10,\n kAckFrameBitfield = 1 << 11,\n kMtuDiscoveryFrameBitfield = 1 << 12,\n kNewConnectionIdFrameBitfield = 1 << 13,\n kMaxStreamsFrameBitfield = 1 << 14,\n kStreamsBlockedFrameBitfield = 1 << 15,\n kPathResponseFrameBitfield = 1 << 16,\n kPathChallengeFrameBitfield = 1 << 17,\n kStopSendingFrameBitfield = 1 << 18,\n kMessageFrameBitfield = 1 << 19,\n kNewTokenFrameBitfield = 1 << 20,\n kRetireConnectionIdFrameBitfield = 1 << 21,\n kAckFrequencyFrameBitfield = 1 << 22,\n kResetStreamAtFrameBitfield = 1 << 23,\n};\nQuicFrameTypeBitfield GetFrameTypeBitfield(QuicFrameType type) {\n switch (type) {\n case PADDING_FRAME:\n return kPaddingFrameBitfield;\n case RST_STREAM_FRAME:\n return kRstStreamFrameBitfield;\n case CONNECTION_CLOSE_FRAME:\n return kConnectionCloseFrameBitfield;\n case GOAWAY_FRAME:\n return kGoawayFrameBitfield;\n case WINDOW_UPDATE_FRAME:\n return kWindowUpdateFrameBitfield;\n case BLOCKED_FRAME:\n return kBlockedFrameBitfield;\n case STOP_WAITING_FRAME:\n return kStopWaitingFrameBitfield;\n case PING_FRAME:\n return kPingFrameBitfield;\n case CRYPTO_FRAME:\n return kCryptoFrameBitfield;\n case HANDSHAKE_DONE_FRAME:\n return kHandshakeDoneFrameBitfield;\n case STREAM_FRAME:\n return kStreamFrameBitfield;\n case ACK_FRAME:\n return kAckFrameBitfield;\n case MTU_DISCOVERY_FRAME:\n return kMtuDiscoveryFrameBitfield;\n case NEW_CONNECTION_ID_FRAME:\n return kNewConnectionIdFrameBitfield;\n case MAX_STREAMS_FRAME:\n return kMaxStreamsFrameBitfield;\n case STREAMS_BLOCKED_FRAME:\n return kStreamsBlockedFrameBitfield;\n case PATH_RESPONSE_FRAME:\n return kPathResponseFrameBitfield;\n case PATH_CHALLENGE_FRAME:\n return kPathChallengeFrameBitfield;\n case STOP_SENDING_FRAME:\n return kStopSendingFrameBitfield;\n case MESSAGE_FRAME:\n return kMessageFrameBitfield;\n case NEW_TOKEN_FRAME:\n return kNewTokenFrameBitfield;\n case RETIRE_CONNECTION_ID_FRAME:\n return kRetireConnectionIdFrameBitfield;\n case ACK_FREQUENCY_FRAME:\n return kAckFrequencyFrameBitfield;\n case RESET_STREAM_AT_FRAME:\n return kResetStreamAtFrameBitfield;\n case NUM_FRAME_TYPES:\n QUIC_BUG(quic_bug_10518_1) << \"Unexpected frame type\";\n return kInvalidFrameBitfield;\n }\n QUIC_BUG(quic_bug_10518_2) << \"Unexpected frame type\";\n return kInvalidFrameBitfield;\n}\n} \nQuicUnackedPacketMap::QuicUnackedPacketMap(Perspective perspective)\n : perspective_(perspective),\n least_unacked_(FirstSendingPacketNumber()),\n bytes_in_flight_(0),\n bytes_in_flight_per_packet_number_space_{0, 0, 0},\n packets_in_flight_(0),\n last_inflight_packet_sent_time_(QuicTime::Zero()),\n last_inflight_packets_sent_time_{\n {QuicTime::Zero()}, {QuicTime::Zero()}, {QuicTime::Zero()}},\n last_crypto_packet_sent_time_(QuicTime::Zero()),\n session_notifier_(nullptr),\n supports_multiple_packet_number_spaces_(false) {}\nQuicUnackedPacketMap::~QuicUnackedPacketMap() {\n for (QuicTransmissionInfo& transmission_info : unacked_packets_) {\n DeleteFrames(&(transmission_info.retransmittable_frames));\n }\n}\nconst QuicTransmissionInfo& QuicUnackedPacketMap::AddDispatcherSentPacket(\n const DispatcherSentPacket& packet) {\n QuicPacketNumber packet_number = packet.packet_number;\n QUICHE_DCHECK_EQ(least_unacked_, FirstSendingPacketNumber());\n QUIC_BUG_IF(quic_unacked_map_dispatcher_packet_num_too_small,\n largest_sent_packet_.IsInitialized() &&\n largest_sent_packet_ >= packet_number)\n << \"largest_sent_packet_: \" << largest_sent_packet_\n << \", packet_number: \" << packet_number;\n QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size());\n while (least_unacked_ + unacked_packets_.size() < packet_number) {\n unacked_packets_.push_back(QuicTransmissionInfo());\n unacked_packets_.back().state = NEVER_SENT;\n }\n QuicTransmissionInfo& info =\n unacked_packets_.emplace_back(ENCRYPTION_INITIAL, NOT_RETRANSMISSION,\n packet.sent_time, packet.bytes_sent,\n false,\n false, ECN_NOT_ECT);\n QUICHE_DCHECK(!info.in_flight);\n info.state = NOT_CONTRIBUTING_RTT;\n info.largest_acked = packet.largest_acked;\n largest_sent_largest_acked_.UpdateMax(packet.largest_acked);\n largest_sent_packet_ = packet_number;\n return info;\n}\nvoid QuicUnackedPacketMap::AddSentPacket(SerializedPacket* mutable_packet,\n TransmissionType transmission_type,\n QuicTime sent_time, bool set_in_flight,\n bool measure_rtt,\n QuicEcnCodepoint ecn_codepoint) {\n const SerializedPacket& packet = *mutable_packet;\n QuicPacketNumber packet_number = packet.packet_number;\n QuicPacketLength bytes_sent = packet.encrypted_length;\n QUIC_BUG_IF(quic_bug_12645_1, largest_sent_packet_.IsInitialized() &&\n largest_sent_packet_ >= packet_number)\n << \"largest_sent_packet_: \" << largest_sent_packet_\n << \", packet_number: \" << packet_number;\n QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size());\n while (least_unacked_ + unacked_packets_.size() < packet_number) {\n unacked_packets_.push_back(QuicTransmissionInfo());\n unacked_packets_.back().state = NEVER_SENT;\n }\n const bool has_crypto_handshake = packet.has_crypto_handshake == IS_HANDSHAKE;\n QuicTransmissionInfo info(packet.encryption_level, transmission_type,\n sent_time, bytes_sent, has_crypto_handshake,\n packet.has_ack_frequency, ecn_codepoint);\n info.largest_acked = packet.largest_acked;\n largest_sent_largest_acked_.UpdateMax(packet.largest_acked);\n if (!measure_rtt) {\n QUIC_BUG_IF(quic_bug_12645_2, set_in_flight)\n << \"Packet \" << mutable_packet->packet_number << \", transmission type \"\n << TransmissionTypeToString(mutable_packet->transmission_type)\n << \", retransmittable frames: \"\n << QuicFramesToString(mutable_packet->retransmittable_frames)\n << \", nonretransmittable_frames: \"\n << QuicFramesToString(mutable_packet->nonretransmittable_frames);\n info.state = NOT_CONTRIBUTING_RTT;\n }\n largest_sent_packet_ = packet_number;\n if (set_in_flight) {\n const PacketNumberSpace packet_number_space =\n GetPacketNumberSpace(info.encryption_level);\n bytes_in_flight_ += bytes_sent;\n bytes_in_flight_per_packet_number_space_[packet_number_space] += bytes_sent;\n ++packets_in_flight_;\n info.in_flight = true;\n largest_sent_retransmittable_packets_[packet_number_space] = packet_number;\n last_inflight_packet_sent_time_ = sent_time;\n last_inflight_packets_sent_time_[packet_number_space] = sent_time;\n }\n unacked_packets_.push_back(std::move(info));\n if (has_crypto_handshake) {\n last_crypto_packet_sent_time_ = sent_time;\n }\n mutable_packet->retransmittable_frames.swap(\n unacked_packets_.back().retransmittable_frames);\n}\nvoid QuicUnackedPacketMap::RemoveObsoletePackets() {\n while (!unacked_packets_.empty()) {\n if (!IsPacketUseless(least_unacked_, unacked_packets_.front())) {\n break;\n }\n DeleteFrames(&unacked_packets_.front().retransmittable_frames);\n unacked_packets_.pop_front();\n ++least_unacked_;\n }\n}\nbool QuicUnackedPacketMap::HasRetransmittableFrames(\n QuicPacketNumber packet_number) const {\n QUICHE_DCHECK_GE(packet_number, least_unacked_);\n QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());\n return HasRetransmittableFrames(\n unacked_packets_[packet_number - least_unacked_]);\n}\nbool QuicUnackedPacketMap::HasRetransmittableFrames(\n const QuicTransmissionInfo& info) const {\n if (!QuicUtils::IsAckable(info.state)) {\n return false;\n }\n for (const auto& frame : info.retransmittable_frames) {\n if (session_notifier_->IsFrameOutstanding(frame)) {\n return true;\n }\n }\n return false;\n}\nvoid QuicUnackedPacketMap::RemoveRetransmittability(\n QuicTransmissionInfo* info) {\n DeleteFrames(&info->retransmittable_frames);\n info->first_sent_after_loss.Clear();\n}\nvoid QuicUnackedPacketMap::RemoveRetransmittability(\n QuicPacketNumber packet_number) {\n QUICHE_DCHECK_GE(packet_number, least_unacked_);\n QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());\n QuicTransmissionInfo* info =\n &unacked_packets_[packet_number - least_unacked_];\n RemoveRetransmittability(info);\n}\nvoid QuicUnackedPacketMap::IncreaseLargestAcked(\n QuicPacketNumber largest_acked) {\n QUICHE_DCHECK(!largest_acked_.IsInitialized() ||\n largest_acked_ <= largest_acked);\n largest_acked_ = largest_acked;\n}\nvoid QuicUnackedPacketMap::MaybeUpdateLargestAckedOfPacketNumberSpace(\n PacketNumberSpace packet_number_space, QuicPacketNumber packet_number) {\n largest_acked_packets_[packet_number_space].UpdateMax(packet_number);\n}\nbool QuicUnackedPacketMap::IsPacketUsefulForMeasuringRtt(\n QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const {\n return QuicUtils::IsAckable(info.state) &&\n (!largest_acked_.IsInitialized() || packet_number > largest_acked_) &&\n info.state != NOT_CONTRIBUTING_RTT;\n}\nbool QuicUnackedPacketMap::IsPacketUsefulForCongestionControl(\n const QuicTransmissionInfo& info) const {\n return info.in_flight;\n}\nbool QuicUnackedPacketMap::IsPacketUsefulForRetransmittableData(\n const QuicTransmissionInfo& info) const {\n return info.first_sent_after_loss.IsInitialized() &&\n (!largest_acked_.IsInitialized() ||\n info.first_sent_after_loss > largest_acked_);\n}\nbool QuicUnackedPacketMap::IsPacketUseless(\n QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const {\n return !IsPacketUsefulForMeasuringRtt(packet_number, info) &&\n !IsPacketUsefulForCongestionControl(info) &&\n !IsPacketUsefulForRetransmittableData(info);\n}\nbool QuicUnackedPacketMap::IsUnacked(QuicPacketNumber packet_number) const {\n if (packet_number < least_unacked_ ||\n packet_number >= least_unacked_ + unacked_packets_.size()) {\n return false;\n }\n return !IsPacketUseless(packet_number,\n unacked_packets_[packet_number - least_unacked_]);\n}\nvoid QuicUnackedPacketMap::RemoveFromInFlight(QuicTransmissionInfo* info) {\n if (info->in_flight) {\n QUIC_BUG_IF(quic_bug_12645_3, bytes_in_flight_ < info->bytes_sent);\n QUIC_BUG_IF(quic_bug_12645_4, packets_in_flight_ == 0);\n bytes_in_flight_ -= info->bytes_sent;\n --packets_in_flight_;\n const PacketNumberSpace packet_number_space =\n GetPacketNumberSpace(info->encryption_level);\n if (bytes_in_flight_per_packet_number_space_[packet_number_space] <\n info->bytes_sent) {\n QUIC_BUG(quic_bug_10518_3)\n << \"bytes_in_flight: \"\n << bytes_in_flight_per_packet_number_space_[packet_number_space]\n << \" is smaller than bytes_sent: \" << info->bytes_sent\n << \" for packet number space: \"\n << PacketNumberSpaceToString(packet_number_space);\n bytes_in_flight_per_packet_number_space_[packet_number_space] = 0;\n } else {\n bytes_in_flight_per_packet_number_space_[packet_number_space] -=\n info->bytes_sent;\n }\n if (bytes_in_flight_per_packet_number_space_[packet_number_space] == 0) {\n last_inflight_packets_sent_time_[packet_number_space] = QuicTime::Zero();\n }\n info->in_flight = false;\n }\n}\nvoid QuicUnackedPacketMap::RemoveFromInFlight(QuicPacketNumber packet_number) {\n QUICHE_DCHECK_GE(packet_number, least_unacked_);\n QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());\n QuicTransmissionInfo* info =\n &unacked_packets_[packet_number - least_unacked_];\n RemoveFromInFlight(info);\n}\nabsl::InlinedVector\nQuicUnackedPacketMap::NeuterUnencryptedPackets() {\n absl::InlinedVector neutered_packets;\n QuicPacketNumber packet_number = GetLeastUnacked();\n for (QuicUnackedPacketMap::iterator it = begin(); it != end();\n ++it, ++packet_number) {\n if (!it->retransmittable_frames.empty() &&\n it->encryption_level == ENCRYPTION_INITIAL) {\n QUIC_DVLOG(2) << \"Neutering unencrypted packet \" << packet_number;\n RemoveFromInFlight(packet_number);\n it->state = NEUTERED;\n neutered_packets.push_back(packet_number);\n NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero());\n QUICHE_DCHECK(!HasRetransmittableFrames(*it));\n }\n }\n QUICHE_DCHECK(!supports_multiple_packet_number_spaces_ ||\n last_inflight_packets_sent_time_[INITIAL_DATA] ==\n QuicTime::Zero());\n return neutered_packets;\n}\nabsl::InlinedVector\nQuicUnackedPacketMap::NeuterHandshakePackets() {\n absl::InlinedVector neutered_packets;\n QuicPacketNumber packet_number = GetLeastUnacked();\n for (QuicUnackedPacketMap::iterator it = begin(); it != end();\n ++it, ++packet_number) {\n if (!it->retransmittable_frames.empty() &&\n GetPacketNumberSpace(it->encryption_level) == HANDSHAKE_DATA) {\n QUIC_DVLOG(2) << \"Neutering handshake packet \" << packet_number;\n RemoveFromInFlight(packet_number);\n it->state = NEUTERED;\n neutered_packets.push_back(packet_number);\n NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero());\n }\n }\n QUICHE_DCHECK(!supports_multiple_packet_number_spaces() ||\n last_inflight_packets_sent_time_[HANDSHAKE_DATA] ==\n QuicTime::Zero());\n return neutered_packets;\n}\nbool QuicUnackedPacketMap::HasInFlightPackets() const {\n return bytes_in_flight_ > 0;\n}\nconst QuicTransmissionInfo& QuicUnackedPacketMap::GetTransmissionInfo(\n QuicPacketNumber packet_number) const {\n return unacked_packets_[packet_number - least_unacked_];\n}\nQuicTransmissionInfo* QuicUnackedPacketMap::GetMutableTransmissionInfo(\n QuicPacketNumber packet_number) {\n return &unacked_packets_[packet_number - least_unacked_];\n}\nQuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime() const {\n return last_inflight_packet_sent_time_;\n}\nQuicTime QuicUnackedPacketMap::GetLastCryptoPacketSentTime() const {\n return last_crypto_packet_sent_time_;\n}\nsize_t QuicUnackedPacketMap::GetNumUnackedPacketsDebugOnly() const {\n size_t unacked_packet_count = 0;\n QuicPacketNumber packet_number = least_unacked_;\n for (auto it = begin(); it != end(); ++it, ++packet_number) {\n if (!IsPacketUseless(packet_number, *it)) {\n ++unacked_packet_count;\n }\n }\n return unacked_packet_count;\n}\nbool QuicUnackedPacketMap::HasMultipleInFlightPackets() const {\n if (bytes_in_flight_ > kDefaultTCPMSS) {\n return true;\n }\n size_t num_in_flight = 0;\n for (auto it = rbegin(); it != rend(); ++it) {\n if (it->in_flight) {\n ++num_in_flight;\n }\n if (num_in_flight > 1) {\n return true;\n }\n }\n return false;\n}\nbool QuicUnackedPacketMap::HasPendingCryptoPackets() const {\n return session_notifier_->HasUnackedCryptoData();\n}\nbool QuicUnackedPacketMap::HasUnackedRetransmittableFrames() const {\n for (auto it = rbegin(); it != rend(); ++it) {\n if (it->in_flight && HasRetransmittableFrames(*it)) {\n return true;\n }\n }\n return false;\n}\nQuicPacketNumber QuicUnackedPacketMap::GetLeastUnacked() const {\n return least_unacked_;\n}\nvoid QuicUnackedPacketMap::SetSessionNotifier(\n SessionNotifierInterface* session_notifier) {\n session_notifier_ = session_notifier;\n}\nbool QuicUnackedPacketMap::NotifyFramesAcked(const QuicTransmissionInfo& info,\n QuicTime::Delta ack_delay,\n QuicTime receive_timestamp) {\n if (session_notifier_ == nullptr) {\n return false;\n }\n bool new_data_acked = false;\n for (const QuicFrame& frame : info.retransmittable_frames) {\n if (session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp)) {\n new_data_acked = true;\n }\n }\n return new_data_acked;\n}\nvoid QuicUnackedPacketMap::NotifyFramesLost(const QuicTransmissionInfo& info,\n TransmissionType ) {\n for (const QuicFrame& frame : info.retransmittable_frames) {\n session_notifier_->OnFrameLost(frame);\n }\n}\nbool QuicUnackedPacketMap::RetransmitFrames(const QuicFrames& frames,\n TransmissionType type) {\n return session_notifier_->RetransmitFrames(frames, type);\n}\nvoid QuicUnackedPacketMap::MaybeAggregateAckedStreamFrame(\n const QuicTransmissionInfo& info, QuicTime::Delta ack_delay,\n QuicTime receive_timestamp) {\n if (session_notifier_ == nullptr) {\n return;\n }\n for (const auto& frame : info.retransmittable_frames) {\n const bool can_aggregate =\n frame.type == STREAM_FRAME &&\n frame.stream_frame.stream_id == aggregated_stream_frame_.stream_id &&\n frame.stream_frame.offset == aggregated_stream_frame_.offset +\n aggregated_stream_frame_.data_length &&\n !WillStreamFrameLengthSumWrapAround(\n aggregated_stream_frame_.data_length,\n frame.stream_frame.data_length);\n if (can_aggregate) {\n aggregated_stream_frame_.data_length += frame.stream_frame.data_length;\n aggregated_stream_frame_.fin = frame.stream_frame.fin;\n if (aggregated_stream_frame_.fin) {\n NotifyAggregatedStreamFrameAcked(ack_delay);\n }\n continue;\n }\n NotifyAggregatedStreamFrameAcked(ack_delay);\n if (frame.type != STREAM_FRAME || frame.stream_frame.fin) {\n session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp);\n continue;\n }\n aggregated_stream_frame_.stream_id = frame.stream_frame.stream_id;\n aggregated_stream_frame_.offset = frame.stream_frame.offset;\n aggregated_stream_frame_.data_length = frame.stream_frame.data_length;\n aggregated_stream_frame_.fin = frame.stream_frame.fin;\n }\n}\nvoid QuicUnackedPacketMap::NotifyAggregatedStreamFrameAcked(\n QuicTime::Delta ack_delay) {\n if (aggregated_stream_frame_.stream_id == static_cast(-1) ||\n session_notifier_ == nullptr) {\n return;\n }\n session_notifier_->OnFrameAcked(QuicFrame(aggregated_stream_frame_),\n ack_delay,\n QuicTime::Zero());\n aggregated_stream_frame_.stream_id = -1;\n}\nPacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace(\n QuicPacketNumber packet_number) const {\n return GetPacketNumberSpace(\n GetTransmissionInfo(packet_number).encryption_level);\n}\nPacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace(\n EncryptionLevel encryption_level) const {\n if (supports_multiple_packet_number_spaces_) {\n return QuicUtils::GetPacketNumberSpace(encryption_level);\n }\n if (perspective_ == Perspective::IS_CLIENT) {\n return encryption_level == ENCRYPTION_INITIAL ? HANDSHAKE_DATA\n : APPLICATION_DATA;\n }\n return encryption_level == ENCRYPTION_FORWARD_SECURE ? APPLICATION_DATA\n : HANDSHAKE_DATA;\n}\nQuicPacketNumber QuicUnackedPacketMap::GetLargestAckedOfPacketNumberSpace(\n PacketNumberSpace packet_number_space) const {\n if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {\n QUIC_BUG(quic_bug_10518_4)\n << \"Invalid packet number space: \" << packet_number_space;\n return QuicPacketNumber();\n }\n return largest_acked_packets_[packet_number_space];\n}\nQuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime(\n PacketNumberSpace packet_number_space) const {\n if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {\n QUIC_BUG(quic_bug_10518_5)\n << \"Invalid packet number space: \" << packet_number_space;\n return QuicTime::Zero();\n }\n return last_inflight_packets_sent_time_[packet_number_space];\n}\nQuicPacketNumber\nQuicUnackedPacketMap::GetLargestSentRetransmittableOfPacketNumberSpace(\n PacketNumberSpace packet_number_space) const {\n if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {\n QUIC_BUG(quic_bug_10518_6)\n << \"Invalid packet number space: \" << packet_number_space;\n return QuicPacketNumber();\n }\n return largest_sent_retransmittable_packets_[packet_number_space];\n}\nconst QuicTransmissionInfo*\nQuicUnackedPacketMap::GetFirstInFlightTransmissionInfo() const {\n QUICHE_DCHECK(HasInFlightPackets());\n for (auto it = begin(); it != end(); ++it) {\n if (it->in_flight) {\n return &(*it);\n }\n }\n QUICHE_DCHECK(false);\n return nullptr;\n}\nconst QuicTransmissionInfo*\nQuicUnackedPacketMap::GetFirstInFlightTransmissionInfoOfSpace(\n PacketNumberSpace packet_number_space) const {\n for (auto it = begin(); it != end(); ++it) {\n if (it->in_flight &&\n GetPacketNumberSpace(it->encryption_level) == packet_number_space) {\n return &(*it);\n }\n }\n return nullptr;\n}\nvoid QuicUnackedPacketMap::EnableMultiplePacketNumberSpacesSupport() {\n if (supports_multiple_packet_number_spaces_) {\n QUIC_BUG(quic_bug_10518_7)\n << \"Multiple packet number spaces has already been enabled\";\n return;\n }\n if (largest_sent_packet_.IsInitialized()) {\n QUIC_BUG(quic_bug_10518_8)\n << \"Try to enable multiple packet number spaces support after any \"\n \"packet has been sent.\";\n return;\n }\n supports_multiple_packet_number_spaces_ = true;\n}\nint32_t QuicUnackedPacketMap::GetLastPacketContent() const {\n if (empty()) {\n return -1;\n }\n int32_t content = 0;\n const QuicTransmissionInfo& last_packet = unacked_packets_.back();\n for (const auto& frame : last_packet.retransmittable_frames) {\n content |= GetFrameTypeBitfield(frame.type);\n }\n if (last_packet.largest_acked.IsInitialized()) {\n content |= GetFrameTypeBitfield(ACK_FRAME);\n }\n return content;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/quic/core/quic_unacked_packet_map.h\"\n#include \n#include \n#include \n#include \"absl/base/macros.h\"\n#include \"quiche/quic/core/frames/quic_stream_frame.h\"\n#include \"quiche/quic/core/quic_packet_number.h\"\n#include \"quiche/quic/core/quic_transmission_info.h\"\n#include \"quiche/quic/core/quic_utils.h\"\n#include \"quiche/quic/platform/api/quic_test.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\n#include \"quiche/quic/test_tools/quic_unacked_packet_map_peer.h\"\nusing testing::_;\nusing testing::Return;\nusing testing::StrictMock;\nnamespace quic {\nnamespace test {\nnamespace {\nconst uint32_t kDefaultLength = 1000;\nclass QuicUnackedPacketMapTest : public QuicTestWithParam {\n protected:\n QuicUnackedPacketMapTest()\n : unacked_packets_(GetParam()),\n now_(QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1000)) {\n unacked_packets_.SetSessionNotifier(&notifier_);\n EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(true));\n EXPECT_CALL(notifier_, OnStreamFrameRetransmitted(_))\n .Times(testing::AnyNumber());\n }\n ~QuicUnackedPacketMapTest() override {}\n SerializedPacket CreateRetransmittablePacket(uint64_t packet_number) {\n return CreateRetransmittablePacketForStream(\n packet_number, QuicUtils::GetFirstBidirectionalStreamId(\n CurrentSupportedVersions()[0].transport_version,\n Perspective::IS_CLIENT));\n }\n SerializedPacket CreateRetransmittablePacketForStream(\n uint64_t packet_number, QuicStreamId stream_id) {\n SerializedPacket packet(QuicPacketNumber(packet_number),\n PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,\n false, false);\n QuicStreamFrame frame;\n frame.stream_id = stream_id;\n packet.retransmittable_frames.push_back(QuicFrame(frame));\n return packet;\n }\n SerializedPacket CreateNonRetransmittablePacket(uint64_t packet_number) {\n return SerializedPacket(QuicPacketNumber(packet_number),\n PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,\n false, false);\n }\n void VerifyInFlightPackets(uint64_t* packets, size_t num_packets) {\n unacked_packets_.RemoveObsoletePackets();\n if (num_packets == 0) {\n EXPECT_FALSE(unacked_packets_.HasInFlightPackets());\n EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets());\n return;\n }\n if (num_packets == 1) {\n EXPECT_TRUE(unacked_packets_.HasInFlightPackets());\n EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets());\n ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[0])));\n EXPECT_TRUE(\n unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[0]))\n .in_flight);\n }\n for (size_t i = 0; i < num_packets; ++i) {\n ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i])));\n EXPECT_TRUE(\n unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[i]))\n .in_flight);\n }\n size_t in_flight_count = 0;\n for (auto it = unacked_packets_.begin(); it != unacked_packets_.end();\n ++it) {\n if (it->in_flight) {\n ++in_flight_count;\n }\n }\n EXPECT_EQ(num_packets, in_flight_count);\n }\n void VerifyUnackedPackets(uint64_t* packets, size_t num_packets) {\n unacked_packets_.RemoveObsoletePackets();\n if (num_packets == 0) {\n EXPECT_TRUE(unacked_packets_.empty());\n EXPECT_FALSE(unacked_packets_.HasUnackedRetransmittableFrames());\n return;\n }\n EXPECT_FALSE(unacked_packets_.empty());\n for (size_t i = 0; i < num_packets; ++i) {\n EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i])))\n << packets[i];\n }\n EXPECT_EQ(num_packets, unacked_packets_.GetNumUnackedPacketsDebugOnly());\n }\n void VerifyRetransmittablePackets(uint64_t* packets, size_t num_packets) {\n unacked_packets_.RemoveObsoletePackets();\n size_t num_retransmittable_packets = 0;\n for (auto it = unacked_packets_.begin(); it != unacked_packets_.end();\n ++it) {\n if (unacked_packets_.HasRetransmittableFrames(*it)) {\n ++num_retransmittable_packets;\n }\n }\n EXPECT_EQ(num_packets, num_retransmittable_packets);\n for (size_t i = 0; i < num_packets; ++i) {\n EXPECT_TRUE(unacked_packets_.HasRetransmittableFrames(\n QuicPacketNumber(packets[i])))\n << \" packets[\" << i << \"]:\" << packets[i];\n }\n }\n void UpdatePacketState(uint64_t packet_number, SentPacketState state) {\n unacked_packets_\n .GetMutableTransmissionInfo(QuicPacketNumber(packet_number))\n ->state = state;\n }\n void RetransmitAndSendPacket(uint64_t old_packet_number,\n uint64_t new_packet_number,\n TransmissionType transmission_type) {\n QUICHE_DCHECK(unacked_packets_.HasRetransmittableFrames(\n QuicPacketNumber(old_packet_number)));\n QuicTransmissionInfo* info = unacked_packets_.GetMutableTransmissionInfo(\n QuicPacketNumber(old_packet_number));\n QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(\n CurrentSupportedVersions()[0].transport_version,\n Perspective::IS_CLIENT);\n for (const auto& frame : info->retransmittable_frames) {\n if (frame.type == STREAM_FRAME) {\n stream_id = frame.stream_frame.stream_id;\n break;\n }\n }\n UpdatePacketState(\n old_packet_number,\n QuicUtils::RetransmissionTypeToPacketState(transmission_type));\n info->first_sent_after_loss = QuicPacketNumber(new_packet_number);\n SerializedPacket packet(\n CreateRetransmittablePacketForStream(new_packet_number, stream_id));\n unacked_packets_.AddSentPacket(&packet, transmission_type, now_, true, true,\n ECN_NOT_ECT);\n }\n QuicUnackedPacketMap unacked_packets_;\n QuicTime now_;\n StrictMock notifier_;\n};\nINSTANTIATE_TEST_SUITE_P(Tests, QuicUnackedPacketMapTest,\n ::testing::ValuesIn({Perspective::IS_CLIENT,\n Perspective::IS_SERVER}),\n ::testing::PrintToStringParamName());\nTEST_P(QuicUnackedPacketMapTest, RttOnly) {\n SerializedPacket packet(CreateNonRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, false, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(nullptr, 0);\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));\n VerifyUnackedPackets(nullptr, 0);\n VerifyInFlightPackets(nullptr, 0);\n VerifyRetransmittablePackets(nullptr, 0);\n}\nTEST_P(QuicUnackedPacketMapTest, RetransmittableInflightAndRtt) {\n SerializedPacket packet(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(unacked, ABSL_ARRAYSIZE(unacked));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));\n VerifyUnackedPackets(nullptr, 0);\n VerifyInFlightPackets(nullptr, 0);\n VerifyRetransmittablePackets(nullptr, 0);\n}\nTEST_P(QuicUnackedPacketMapTest, StopRetransmission) {\n const QuicStreamId stream_id = 2;\n SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));\n unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n uint64_t retransmittable[] = {1};\n VerifyRetransmittablePackets(retransmittable,\n ABSL_ARRAYSIZE(retransmittable));\n EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n}\nTEST_P(QuicUnackedPacketMapTest, StopRetransmissionOnOtherStream) {\n const QuicStreamId stream_id = 2;\n SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));\n unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n uint64_t retransmittable[] = {1};\n VerifyRetransmittablePackets(retransmittable,\n ABSL_ARRAYSIZE(retransmittable));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(retransmittable,\n ABSL_ARRAYSIZE(retransmittable));\n}\nTEST_P(QuicUnackedPacketMapTest, StopRetransmissionAfterRetransmission) {\n const QuicStreamId stream_id = 2;\n SerializedPacket packet1(CreateRetransmittablePacketForStream(1, stream_id));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION);\n uint64_t unacked[] = {1, 2};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n std::vector retransmittable = {1, 2};\n VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size());\n EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n}\nTEST_P(QuicUnackedPacketMapTest, RetransmittedPacket) {\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION);\n uint64_t unacked[] = {1, 2};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n std::vector retransmittable = {1, 2};\n VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size());\n EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));\n uint64_t unacked2[] = {1};\n VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));\n VerifyInFlightPackets(unacked2, ABSL_ARRAYSIZE(unacked2));\n VerifyRetransmittablePackets(nullptr, 0);\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));\n VerifyUnackedPackets(nullptr, 0);\n VerifyInFlightPackets(nullptr, 0);\n VerifyRetransmittablePackets(nullptr, 0);\n}\nTEST_P(QuicUnackedPacketMapTest, RetransmitThreeTimes) {\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n SerializedPacket packet2(CreateRetransmittablePacket(2));\n unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1, 2};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n uint64_t retransmittable[] = {1, 2};\n VerifyRetransmittablePackets(retransmittable,\n ABSL_ARRAYSIZE(retransmittable));\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));\n RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION);\n SerializedPacket packet4(CreateRetransmittablePacket(4));\n unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked2[] = {1, 3, 4};\n VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));\n uint64_t pending2[] = {3, 4};\n VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2));\n std::vector retransmittable2 = {1, 3, 4};\n VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size());\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(4));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(4));\n RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION);\n SerializedPacket packet6(CreateRetransmittablePacket(6));\n unacked_packets_.AddSentPacket(&packet6, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n std::vector unacked3 = {3, 5, 6};\n std::vector retransmittable3 = {3, 5, 6};\n VerifyUnackedPackets(&unacked3[0], unacked3.size());\n VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size());\n uint64_t pending3[] = {3, 5, 6};\n VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3));\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(6));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(6));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(6));\n RetransmitAndSendPacket(5, 7, LOSS_RETRANSMISSION);\n std::vector unacked4 = {3, 5, 7};\n std::vector retransmittable4 = {3, 5, 7};\n VerifyUnackedPackets(&unacked4[0], unacked4.size());\n VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size());\n uint64_t pending4[] = {3, 5, 7};\n VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));\n uint64_t pending5[] = {7};\n VerifyInFlightPackets(pending5, ABSL_ARRAYSIZE(pending5));\n}\nTEST_P(QuicUnackedPacketMapTest, RetransmitFourTimes) {\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n SerializedPacket packet2(CreateRetransmittablePacket(2));\n unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked[] = {1, 2};\n VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));\n VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));\n uint64_t retransmittable[] = {1, 2};\n VerifyRetransmittablePackets(retransmittable,\n ABSL_ARRAYSIZE(retransmittable));\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));\n RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION);\n uint64_t unacked2[] = {1, 3};\n VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));\n uint64_t pending2[] = {3};\n VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2));\n std::vector retransmittable2 = {1, 3};\n VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size());\n RetransmitAndSendPacket(3, 4, PTO_RETRANSMISSION);\n SerializedPacket packet5(CreateRetransmittablePacket(5));\n unacked_packets_.AddSentPacket(&packet5, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n uint64_t unacked3[] = {1, 3, 4, 5};\n VerifyUnackedPackets(unacked3, ABSL_ARRAYSIZE(unacked3));\n uint64_t pending3[] = {3, 4, 5};\n VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3));\n std::vector retransmittable3 = {1, 3, 4, 5};\n VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size());\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(5));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));\n unacked_packets_.RemoveRetransmittability(QuicPacketNumber(5));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));\n RetransmitAndSendPacket(4, 6, LOSS_RETRANSMISSION);\n std::vector unacked4 = {4, 6};\n VerifyUnackedPackets(&unacked4[0], unacked4.size());\n uint64_t pending4[] = {6};\n VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4));\n std::vector retransmittable4 = {4, 6};\n VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size());\n}\nTEST_P(QuicUnackedPacketMapTest, SendWithGap) {\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n SerializedPacket packet3(CreateRetransmittablePacket(3));\n unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION);\n EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.GetLeastUnacked());\n EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(1)));\n EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(2)));\n EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(3)));\n EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(4)));\n EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(5)));\n EXPECT_EQ(QuicPacketNumber(5u), unacked_packets_.largest_sent_packet());\n}\nTEST_P(QuicUnackedPacketMapTest, AggregateContiguousAckedStreamFrames) {\n testing::InSequence s;\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero());\n QuicTransmissionInfo info1;\n QuicStreamFrame stream_frame1(3, false, 0, 100);\n info1.retransmittable_frames.push_back(QuicFrame(stream_frame1));\n QuicTransmissionInfo info2;\n QuicStreamFrame stream_frame2(3, false, 100, 100);\n info2.retransmittable_frames.push_back(QuicFrame(stream_frame2));\n QuicTransmissionInfo info3;\n QuicStreamFrame stream_frame3(3, false, 200, 100);\n info3.retransmittable_frames.push_back(QuicFrame(stream_frame3));\n QuicTransmissionInfo info4;\n QuicStreamFrame stream_frame4(3, true, 300, 0);\n info4.retransmittable_frames.push_back(QuicFrame(stream_frame4));\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info1, QuicTime::Delta::Zero(), QuicTime::Zero());\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info2, QuicTime::Delta::Zero(), QuicTime::Zero());\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info3, QuicTime::Delta::Zero(), QuicTime::Zero());\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info4, QuicTime::Delta::Zero(), QuicTime::Zero());\n}\nTEST_P(QuicUnackedPacketMapTest, CannotAggregateIfDataLengthOverflow) {\n QuicByteCount kMaxAggregatedDataLength =\n std::numeric_limits::max();\n QuicStreamId stream_id = 2;\n for (const QuicPacketLength acked_stream_length : {512, 1300}) {\n ++stream_id;\n QuicStreamOffset offset = 0;\n QuicByteCount aggregated_data_length = 0;\n while (offset < 1e6) {\n QuicTransmissionInfo info;\n QuicStreamFrame stream_frame(stream_id, false, offset,\n acked_stream_length);\n info.retransmittable_frames.push_back(QuicFrame(stream_frame));\n const QuicStreamFrame& aggregated_stream_frame =\n QuicUnackedPacketMapPeer::GetAggregatedStreamFrame(unacked_packets_);\n if (aggregated_stream_frame.data_length + acked_stream_length <=\n kMaxAggregatedDataLength) {\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info, QuicTime::Delta::Zero(), QuicTime::Zero());\n aggregated_data_length += acked_stream_length;\n testing::Mock::VerifyAndClearExpectations(&notifier_);\n } else {\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info, QuicTime::Delta::Zero(), QuicTime::Zero());\n aggregated_data_length = acked_stream_length;\n testing::Mock::VerifyAndClearExpectations(&notifier_);\n }\n EXPECT_EQ(aggregated_data_length, aggregated_stream_frame.data_length);\n offset += acked_stream_length;\n }\n QuicTransmissionInfo info;\n QuicStreamFrame stream_frame(stream_id, true, offset, acked_stream_length);\n info.retransmittable_frames.push_back(QuicFrame(stream_frame));\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info, QuicTime::Delta::Zero(), QuicTime::Zero());\n testing::Mock::VerifyAndClearExpectations(&notifier_);\n }\n}\nTEST_P(QuicUnackedPacketMapTest, CannotAggregateAckedControlFrames) {\n testing::InSequence s;\n QuicWindowUpdateFrame window_update(1, 5, 100);\n QuicStreamFrame stream_frame1(3, false, 0, 100);\n QuicStreamFrame stream_frame2(3, false, 100, 100);\n QuicBlockedFrame blocked(2, 5, 0);\n QuicGoAwayFrame go_away(3, QUIC_PEER_GOING_AWAY, 5, \"Going away.\");\n QuicTransmissionInfo info1;\n info1.retransmittable_frames.push_back(QuicFrame(window_update));\n info1.retransmittable_frames.push_back(QuicFrame(stream_frame1));\n info1.retransmittable_frames.push_back(QuicFrame(stream_frame2));\n QuicTransmissionInfo info2;\n info2.retransmittable_frames.push_back(QuicFrame(blocked));\n info2.retransmittable_frames.push_back(QuicFrame(&go_away));\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info1, QuicTime::Delta::Zero(), QuicTime::Zero());\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(3);\n unacked_packets_.MaybeAggregateAckedStreamFrame(\n info2, QuicTime::Delta::Zero(), QuicTime::Zero());\n EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);\n unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero());\n}\nTEST_P(QuicUnackedPacketMapTest, LargestSentPacketMultiplePacketNumberSpaces) {\n unacked_packets_.EnableMultiplePacketNumberSpacesSupport();\n EXPECT_FALSE(\n unacked_packets_\n .GetLargestSentRetransmittableOfPacketNumberSpace(INITIAL_DATA)\n .IsInitialized());\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n packet1.encryption_level = ENCRYPTION_INITIAL;\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.largest_sent_packet());\n EXPECT_EQ(QuicPacketNumber(1),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n INITIAL_DATA));\n EXPECT_FALSE(\n unacked_packets_\n .GetLargestSentRetransmittableOfPacketNumberSpace(HANDSHAKE_DATA)\n .IsInitialized());\n SerializedPacket packet2(CreateRetransmittablePacket(2));\n packet2.encryption_level = ENCRYPTION_HANDSHAKE;\n unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n EXPECT_EQ(QuicPacketNumber(2u), unacked_packets_.largest_sent_packet());\n EXPECT_EQ(QuicPacketNumber(1),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n INITIAL_DATA));\n EXPECT_EQ(QuicPacketNumber(2),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n HANDSHAKE_DATA));\n EXPECT_FALSE(\n unacked_packets_\n .GetLargestSentRetransmittableOfPacketNumberSpace(APPLICATION_DATA)\n .IsInitialized());\n SerializedPacket packet3(CreateRetransmittablePacket(3));\n packet3.encryption_level = ENCRYPTION_ZERO_RTT;\n unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n EXPECT_EQ(QuicPacketNumber(3u), unacked_packets_.largest_sent_packet());\n EXPECT_EQ(QuicPacketNumber(1),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n INITIAL_DATA));\n EXPECT_EQ(QuicPacketNumber(2),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n HANDSHAKE_DATA));\n EXPECT_EQ(QuicPacketNumber(3),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n APPLICATION_DATA));\n EXPECT_EQ(QuicPacketNumber(3),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n APPLICATION_DATA));\n SerializedPacket packet4(CreateRetransmittablePacket(4));\n packet4.encryption_level = ENCRYPTION_FORWARD_SECURE;\n unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n EXPECT_EQ(QuicPacketNumber(4u), unacked_packets_.largest_sent_packet());\n EXPECT_EQ(QuicPacketNumber(1),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n INITIAL_DATA));\n EXPECT_EQ(QuicPacketNumber(2),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n HANDSHAKE_DATA));\n EXPECT_EQ(QuicPacketNumber(4),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n APPLICATION_DATA));\n EXPECT_EQ(QuicPacketNumber(4),\n unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(\n APPLICATION_DATA));\n EXPECT_TRUE(unacked_packets_.GetLastPacketContent() & (1 << STREAM_FRAME));\n EXPECT_FALSE(unacked_packets_.GetLastPacketContent() & (1 << ACK_FRAME));\n}\nTEST_P(QuicUnackedPacketMapTest, ReserveInitialCapacityTest) {\n QuicUnackedPacketMap unacked_packets(GetParam());\n ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 0u);\n unacked_packets.ReserveInitialCapacity(16);\n QuicStreamId stream_id(1);\n SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));\n unacked_packets.AddSentPacket(&packet, TransmissionType::NOT_RETRANSMISSION,\n now_, true, true, ECN_NOT_ECT);\n ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 16u);\n}\nTEST_P(QuicUnackedPacketMapTest, DebugString) {\n EXPECT_EQ(unacked_packets_.DebugString(),\n \"{size: 0, least_unacked: 1, largest_sent_packet: uninitialized, \"\n \"largest_acked: uninitialized, bytes_in_flight: 0, \"\n \"packets_in_flight: 0}\");\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n EXPECT_EQ(\n unacked_packets_.DebugString(),\n \"{size: 1, least_unacked: 1, largest_sent_packet: 1, largest_acked: \"\n \"uninitialized, bytes_in_flight: 1000, packets_in_flight: 1}\");\n SerializedPacket packet2(CreateRetransmittablePacket(2));\n unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));\n unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));\n unacked_packets_.RemoveObsoletePackets();\n EXPECT_EQ(\n unacked_packets_.DebugString(),\n \"{size: 1, least_unacked: 2, largest_sent_packet: 2, largest_acked: 1, \"\n \"bytes_in_flight: 1000, packets_in_flight: 1}\");\n}\nTEST_P(QuicUnackedPacketMapTest, EcnInfoStored) {\n SerializedPacket packet1(CreateRetransmittablePacket(1));\n unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,\n ECN_NOT_ECT);\n SerializedPacket packet2(CreateRetransmittablePacket(2));\n unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,\n ECN_ECT0);\n SerializedPacket packet3(CreateRetransmittablePacket(3));\n unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,\n ECN_ECT1);\n EXPECT_EQ(\n unacked_packets_.GetTransmissionInfo(QuicPacketNumber(1)).ecn_codepoint,\n ECN_NOT_ECT);\n EXPECT_EQ(\n unacked_packets_.GetTransmissionInfo(QuicPacketNumber(2)).ecn_codepoint,\n ECN_ECT0);\n EXPECT_EQ(\n unacked_packets_.GetTransmissionInfo(QuicPacketNumber(3)).ecn_codepoint,\n ECN_ECT1);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":291,"cells":{"ID":{"kind":"string","value":"0a1cf107-125e-40dd-85f7-591c64563636"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"python_op_gen_annotator"},"File Path in Repository":{"kind":"string","value":"tensorflow/python/framework/python_op_gen_annotator.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/python/framework/python_op_gen_annotator_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/python/framework/python_op_gen_annotator.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/string_view.h\"\n#include \"tensorflow/python/framework/kythe_metadata.pb.h\"\n#include \"tensorflow/python/framework/op_reg_offset.pb.h\"\nnamespace tensorflow {\nnamespace python_op_gen_internal {\nvoid GeneratedCodeAnnotator::AddAnnotation(const OpDef& op_def,\n absl::string_view function_name,\n uint32_t offset_start) {\n const uint32_t start_byte = base_pos_ + offset_start;\n const uint32_t end_byte = start_byte + function_name.size();\n byte_offsets_map_[op_def.name()].generated_start = start_byte;\n byte_offsets_map_[op_def.name()].generated_end = end_byte;\n}\nvoid GeneratedCodeAnnotator::FillSourceOffsets(\n const OpRegOffsets& op_reg_offsets) {\n for (const OpRegOffset& offset : op_reg_offsets.offsets()) {\n if (byte_offsets_map_.find(offset.name()) != byte_offsets_map_.end()) {\n byte_offsets_map_[offset.name()].file_path = offset.filepath();\n byte_offsets_map_[offset.name()].source_start = offset.start();\n byte_offsets_map_[offset.name()].source_end = offset.end();\n }\n }\n}\nstring GeneratedCodeAnnotator::BuildKytheMetadata() {\n GeneratedCodeInfo generated_code_info;\n generated_code_info.set_type(GeneratedCodeInfo::KYTHE0);\n for (const auto& [name, offsets] : byte_offsets_map_) {\n if (offsets.file_path.empty()) {\n continue;\n }\n MappingRule* meta = generated_code_info.add_meta();\n meta->set_type(MappingRule::ANCHOR_ANCHOR);\n meta->set_edge(\"/kythe/edge/imputes\");\n meta->set_source_begin(offsets.source_start);\n meta->set_source_end(offsets.source_end);\n meta->set_target_begin(offsets.generated_start);\n meta->set_target_end(offsets.generated_end);\n VName* vname = meta->mutable_source_vname();\n vname->set_signature(absl::StrFormat(\n \"@%d:%d@tensorflow_op#%s#%s#%s\", offsets.source_start,\n offsets.source_end, name, kKytheCorpus, offsets.file_path));\n vname->set_corpus(std::string(kKytheCorpus));\n vname->set_path(offsets.file_path);\n vname->set_language(\"c++\");\n }\n return \"# kythe.proto.metadata.GeneratedCodeInfo:\" +\n absl::Base64Escape(generated_code_info.SerializeAsString());\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/python/framework/python_op_gen_annotator.h\"\n#include \n#include \"absl/strings/escaping.h\"\n#include \"absl/strings/str_format.h\"\n#include \"absl/strings/str_split.h\"\n#include \"tensorflow/core/platform/protobuf.h\"\n#include \"tensorflow/core/platform/test.h\"\n#include \"tensorflow/python/framework/kythe_metadata.pb.h\"\nnamespace tensorflow {\nnamespace python_op_gen_internal {\nnamespace {\nusing ::testing::StartsWith;\nGeneratedCodeInfo ParseMetadata(string metadata) {\n GeneratedCodeInfo generated_code_info;\n std::pair p = absl::StrSplit(metadata, ':');\n string serialized_generated_code_info;\n absl::Base64Unescape(p.second, &serialized_generated_code_info);\n generated_code_info.ParseFromString(serialized_generated_code_info);\n return generated_code_info;\n}\nTEST(PythonOpGenAnnotatorTest, AddAnnotationWithoutSourceOffsets) {\n GeneratedCodeAnnotator annotator;\n OpDef fakeOpDef;\n fakeOpDef.set_name(\"fake_op\");\n annotator.AddAnnotation(fakeOpDef, \"fake_op\", 0);\n string meta = annotator.BuildKytheMetadata();\n ASSERT_THAT(meta, StartsWith(\"# kythe.proto.metadata.GeneratedCodeInfo:\"));\n GeneratedCodeInfo actual = ParseMetadata(meta);\n GeneratedCodeInfo expected;\n ASSERT_TRUE(protobuf::TextFormat::ParseFromString(\"type: KYTHE0\", &expected));\n EXPECT_EQ(actual.SerializeAsString(), expected.SerializeAsString());\n}\nTEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsets) {\n GeneratedCodeAnnotator annotator;\n OpDef fakeOpDef;\n fakeOpDef.set_name(\"fake_op\");\n OpRegOffsets fakeOffsets;\n ASSERT_TRUE(protobuf::TextFormat::ParseFromString(\n R\"pb(\n offsets {\n name: \"fake_op\",\n filepath: \"file/path/to/fake_op.cc\",\n start: 7,\n end: 11,\n }\n )pb\",\n &fakeOffsets));\n annotator.AddAnnotation(fakeOpDef, \"fake_op\", 100);\n annotator.FillSourceOffsets(fakeOffsets);\n string meta = annotator.BuildKytheMetadata();\n ASSERT_THAT(meta, StartsWith(\"# kythe.proto.metadata.GeneratedCodeInfo:\"));\n GeneratedCodeInfo actual = ParseMetadata(meta);\n EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);\n EXPECT_EQ(actual.meta(0).edge(), \"/kythe/edge/imputes\");\n EXPECT_EQ(\n actual.meta(0).source_vname().signature(),\n absl::StrFormat(\"@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc\",\n kKytheCorpus));\n EXPECT_EQ(actual.meta(0).source_vname().path(), \"file/path/to/fake_op.cc\");\n EXPECT_EQ(actual.meta(0).source_begin(), 7);\n EXPECT_EQ(actual.meta(0).source_end(), 11);\n EXPECT_EQ(actual.meta(0).target_begin(), 100);\n EXPECT_EQ(actual.meta(0).target_end(), 107);\n}\nTEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsetsAndNonZeroBase) {\n GeneratedCodeAnnotator annotator;\n OpDef fakeOpDef;\n fakeOpDef.set_name(\"fake_op\");\n OpRegOffsets fakeOffsets;\n ASSERT_TRUE(protobuf::TextFormat::ParseFromString(\n R\"pb(\n offsets {\n name: \"fake_op\",\n filepath: \"file/path/to/fake_op.cc\",\n start: 7,\n end: 11,\n }\n )pb\",\n &fakeOffsets));\n annotator.SetBase(10);\n annotator.AddAnnotation(fakeOpDef, \"fake_op\", 100);\n annotator.FillSourceOffsets(fakeOffsets);\n string meta = annotator.BuildKytheMetadata();\n ASSERT_THAT(meta, StartsWith(\"# kythe.proto.metadata.GeneratedCodeInfo:\"));\n GeneratedCodeInfo actual = ParseMetadata(meta);\n EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);\n EXPECT_EQ(actual.meta(0).edge(), \"/kythe/edge/imputes\");\n EXPECT_EQ(\n actual.meta(0).source_vname().signature(),\n absl::StrFormat(\"@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc\",\n kKytheCorpus));\n EXPECT_EQ(actual.meta(0).source_vname().path(), \"file/path/to/fake_op.cc\");\n EXPECT_EQ(actual.meta(0).source_begin(), 7);\n EXPECT_EQ(actual.meta(0).source_end(), 11);\n EXPECT_EQ(actual.meta(0).target_begin(), 110);\n EXPECT_EQ(actual.meta(0).target_end(), 117);\n}\nTEST(PythonOpGenAnnotatorTest, AddMultipleAnnotation) {\n GeneratedCodeAnnotator annotator;\n OpDef fakeOpDef;\n OpRegOffsets fakeOffsets;\n ASSERT_TRUE(protobuf::TextFormat::ParseFromString(\n R\"pb(\n offsets {\n name: \"fake_op_1\",\n filepath: \"file/path/to/fake_op.cc\",\n start: 7,\n end: 11,\n }\n offsets {\n name: \"fake_op_2\",\n filepath: \"file/path/to/fake_op.cc\",\n start: 101,\n end: 103,\n }\n )pb\",\n &fakeOffsets));\n fakeOpDef.set_name(\"fake_op_1\");\n annotator.AddAnnotation(fakeOpDef, \"fake_op_1\", 10);\n fakeOpDef.set_name(\"fake_op_2\");\n annotator.AddAnnotation(fakeOpDef, \"fake_op_2\", 100);\n annotator.FillSourceOffsets(fakeOffsets);\n string meta = annotator.BuildKytheMetadata();\n ASSERT_THAT(meta, StartsWith(\"# kythe.proto.metadata.GeneratedCodeInfo:\"));\n GeneratedCodeInfo actual = ParseMetadata(meta);\n EXPECT_EQ(actual.meta_size(), 2);\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":292,"cells":{"ID":{"kind":"string","value":"70f4d2fa-0102-416a-8f90-3068250b5d23"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"nditerable_transformed_array"},"File Path in Repository":{"kind":"string","value":"tensorstore/internal/nditerable_transformed_array.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/internal/nditerable_transformed_array_test.cc"},"Code":{"kind":"string","value":"#include \"tensorstore/internal/nditerable_transformed_array.h\"\n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorstore/array.h\"\n#include \"tensorstore/data_type.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_space/index_transform.h\"\n#include \"tensorstore/index_space/internal/iterate_impl.h\"\n#include \"tensorstore/index_space/internal/transform_rep.h\"\n#include \"tensorstore/index_space/transformed_array.h\"\n#include \"tensorstore/internal/arena.h\"\n#include \"tensorstore/internal/elementwise_function.h\"\n#include \"tensorstore/internal/integer_overflow.h\"\n#include \"tensorstore/internal/nditerable.h\"\n#include \"tensorstore/internal/nditerable_array.h\"\n#include \"tensorstore/internal/nditerable_array_util.h\"\n#include \"tensorstore/internal/nditerable_util.h\"\n#include \"tensorstore/internal/unique_with_intrusive_allocator.h\"\n#include \"tensorstore/strided_layout.h\"\n#include \"tensorstore/util/byte_strided_pointer.h\"\n#include \"tensorstore/util/element_pointer.h\"\n#include \"tensorstore/util/result.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status.h\"\nnamespace tensorstore {\nnamespace internal {\nnamespace input_dim_iter_flags =\n internal_index_space::input_dimension_iteration_flags;\nnamespace {\nclass IterableImpl : public NDIterable::Base {\n public:\n IterableImpl(IndexTransform<> transform, allocator_type allocator)\n : transform_(std::move(transform)),\n input_dimension_flags_(transform_.input_rank(),\n input_dim_iter_flags::can_skip, allocator) {}\n allocator_type get_allocator() const override {\n return input_dimension_flags_.get_allocator();\n }\n int GetDimensionOrder(DimensionIndex dim_i,\n DimensionIndex dim_j) const override {\n auto flags_i = input_dimension_flags_[dim_i];\n if ((flags_i & input_dim_iter_flags::array_indexed) !=\n (input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {\n return (flags_i & input_dim_iter_flags::array_indexed) ? -2 : 2;\n }\n if (flags_i & input_dim_iter_flags::array_indexed) {\n for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;\n ++i) {\n const int order = GetDimensionOrderFromByteStrides(\n state_.index_array_byte_strides[i][dim_i],\n state_.index_array_byte_strides[i][dim_j]);\n if (order != 0) return order;\n }\n }\n return GetDimensionOrderFromByteStrides(state_.input_byte_strides[dim_i],\n state_.input_byte_strides[dim_j]);\n }\n void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {\n const DimensionIndex input_rank = transform_.input_rank();\n for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;\n ++i) {\n UpdateDirectionPrefsFromByteStrides(\n tensorstore::span(state_.index_array_byte_strides[i], input_rank),\n prefs);\n }\n UpdateDirectionPrefsFromByteStrides(\n tensorstore::span(&state_.input_byte_strides[0], input_rank), prefs);\n }\n bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,\n DimensionIndex dim_j, int dir_j,\n Index size_j) const override {\n auto flags_i = input_dimension_flags_[dim_i];\n if ((flags_i & input_dim_iter_flags::array_indexed) !=\n (input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {\n return false;\n }\n if (flags_i & input_dim_iter_flags::array_indexed) {\n for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;\n ++i) {\n if (!CanCombineStridedArrayDimensions(\n state_.index_array_byte_strides[i][dim_i], dir_i,\n state_.index_array_byte_strides[i][dim_j], dir_j, size_j)) {\n return false;\n }\n }\n }\n return CanCombineStridedArrayDimensions(\n state_.input_byte_strides[dim_i], dir_i,\n state_.input_byte_strides[dim_j], dir_j, size_j);\n }\n DataType dtype() const override { return dtype_; }\n IterationBufferConstraint GetIterationBufferConstraint(\n IterationLayoutView layout) const override {\n const DimensionIndex penultimate_dim =\n layout.iteration_dimensions[layout.iteration_dimensions.size() - 2];\n const DimensionIndex last_dim =\n layout.iteration_dimensions[layout.iteration_dimensions.size() - 1];\n if ((last_dim == -1 || (input_dimension_flags_[last_dim] &\n input_dim_iter_flags::array_indexed) == 0) &&\n (penultimate_dim == -1 || (input_dimension_flags_[penultimate_dim] &\n input_dim_iter_flags::array_indexed) == 0)) {\n return {(last_dim == -1 || state_.input_byte_strides[last_dim] *\n layout.directions[last_dim] ==\n this->dtype_->size)\n ? IterationBufferKind::kContiguous\n : IterationBufferKind::kStrided,\n false};\n } else {\n return {IterationBufferKind::kIndexed, false};\n }\n }\n std::ptrdiff_t GetWorkingMemoryBytesPerElement(\n IterationLayoutView layout,\n IterationBufferKind buffer_kind) const override {\n return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;\n }\n NDIterator::Ptr GetIterator(\n NDIterable::IterationBufferKindLayoutView layout) const override {\n return MakeUniqueWithVirtualIntrusiveAllocator(\n get_allocator(), this, layout);\n }\n class IteratorImpl : public NDIterator::Base {\n public:\n IteratorImpl(const IterableImpl* iterable,\n NDIterable::IterationBufferKindLayoutView layout,\n allocator_type allocator)\n : num_index_arrays_(\n iterable->state_.num_array_indexed_output_dimensions),\n num_index_array_iteration_dims_(0),\n iterable_(iterable),\n buffer_(\n num_index_arrays_ +\n layout.iteration_rank() * (num_index_arrays_ + 1) +\n ((layout.buffer_kind == IterationBufferKind::kIndexed)\n ? layout.block_shape[0] * layout.block_shape[1]\n : 0),\n allocator) {\n static_assert(sizeof(Index) >= sizeof(void*));\n for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {\n ByteStridedPointer index_array_pointer =\n iterable->state_.index_array_pointers[j].get();\n for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {\n if (layout.directions[dim] != -1) continue;\n const Index size_minus_1 = layout.shape[dim] - 1;\n const Index index_array_byte_stride =\n iterable->state_.index_array_byte_strides[j][dim];\n index_array_pointer +=\n wrap_on_overflow::Multiply(index_array_byte_stride, size_minus_1);\n }\n buffer_[j] = reinterpret_cast(index_array_pointer.get());\n }\n Index base_offset = 0;\n for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {\n if (layout.directions[dim] != -1) continue;\n const Index size_minus_1 = layout.shape[dim] - 1;\n const Index input_byte_stride =\n iterable->state_.input_byte_strides[dim];\n base_offset = wrap_on_overflow::Add(\n base_offset,\n wrap_on_overflow::Multiply(input_byte_stride, size_minus_1));\n }\n for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {\n const DimensionIndex dim = layout.iteration_dimensions[i];\n if (dim == -1) {\n for (DimensionIndex j = 0; j < num_index_arrays_ + 1; ++j) {\n buffer_[num_index_arrays_ + layout.iteration_rank() * j + i] = 0;\n }\n } else {\n const Index dir = layout.directions[dim];\n const Index input_byte_stride =\n iterable->state_.input_byte_strides[dim];\n buffer_[num_index_arrays_ + i] =\n wrap_on_overflow::Multiply(input_byte_stride, dir);\n if (iterable->input_dimension_flags_[dim] &\n input_dim_iter_flags::array_indexed) {\n num_index_array_iteration_dims_ = i + 1;\n for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {\n const Index index_array_byte_stride =\n iterable->state_.index_array_byte_strides[j][dim];\n buffer_[num_index_arrays_ + layout.iteration_rank() * (j + 1) +\n i] =\n wrap_on_overflow::Multiply(index_array_byte_stride, dir);\n }\n }\n }\n }\n if (layout.buffer_kind == IterationBufferKind::kIndexed) {\n Index* offsets_array =\n buffer_.data() + num_index_arrays_ +\n layout.iteration_rank() * (num_index_arrays_ + 1);\n pointer_ =\n IterationBufferPointer{iterable->state_.base_pointer + base_offset,\n layout.block_shape[1], offsets_array};\n if (num_index_array_iteration_dims_ + 1 < layout.iteration_rank()) {\n FillOffsetsArrayFromStride(\n buffer_[num_index_arrays_ + layout.iteration_rank() - 2],\n buffer_[num_index_arrays_ + layout.iteration_rank() - 1],\n layout.block_shape[0], layout.block_shape[1], offsets_array);\n }\n } else {\n assert(num_index_array_iteration_dims_ + 1 < layout.iteration_rank());\n pointer_ = IterationBufferPointer{\n iterable->state_.base_pointer + base_offset,\n buffer_[num_index_arrays_ + layout.iteration_rank() - 2],\n buffer_[num_index_arrays_ + layout.iteration_rank() - 1]};\n }\n }\n allocator_type get_allocator() const override {\n return buffer_.get_allocator();\n }\n bool GetBlock(tensorstore::span indices,\n IterationBufferShape block_shape,\n IterationBufferPointer* pointer,\n absl::Status* status) override {\n IterationBufferPointer block_pointer = pointer_;\n block_pointer.pointer += IndexInnerProduct(\n indices.size(), indices.data(), buffer_.data() + num_index_arrays_);\n if (num_index_array_iteration_dims_ + 1 < indices.size()) {\n for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {\n const Index index = ByteStridedPointer(\n reinterpret_cast(buffer_[j]))[IndexInnerProduct(\n num_index_array_iteration_dims_, indices.data(),\n buffer_.data() + num_index_arrays_ + indices.size() * (j + 1))];\n block_pointer.pointer += wrap_on_overflow::Multiply(\n iterable_->state_.index_array_output_byte_strides[j], index);\n }\n } else {\n block_pointer.byte_offsets_outer_stride = block_shape[1];\n Index* offsets_array = const_cast(block_pointer.byte_offsets);\n FillOffsetsArrayFromStride(\n buffer_[num_index_arrays_ + indices.size() - 2],\n buffer_[num_index_arrays_ + indices.size() - 1], block_shape[0],\n block_shape[1], offsets_array);\n for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {\n const Index* index_array_byte_strides =\n buffer_.data() + num_index_arrays_ + indices.size() * (j + 1);\n ByteStridedPointer index_array_pointer =\n ByteStridedPointer(\n reinterpret_cast(buffer_[j])) +\n IndexInnerProduct(indices.size() - 2, indices.data(),\n index_array_byte_strides);\n const Index output_byte_stride =\n iterable_->state_.index_array_output_byte_strides[j];\n const Index penultimate_index_array_byte_stride =\n index_array_byte_strides[indices.size() - 2];\n const Index last_index_array_byte_stride =\n index_array_byte_strides[indices.size() - 1];\n if (last_index_array_byte_stride == 0 &&\n penultimate_index_array_byte_stride == 0) {\n block_pointer.pointer += wrap_on_overflow::Multiply(\n output_byte_stride, *index_array_pointer);\n } else {\n Index block_start0 = indices[indices.size() - 2];\n Index block_start1 = indices[indices.size() - 1];\n for (Index outer = 0; outer < block_shape[0]; ++outer) {\n for (Index inner = 0; inner < block_shape[1]; ++inner) {\n Index cur_contribution = wrap_on_overflow::Multiply(\n output_byte_stride,\n index_array_pointer[wrap_on_overflow::Add(\n wrap_on_overflow::Multiply(\n outer + block_start0,\n penultimate_index_array_byte_stride),\n wrap_on_overflow::Multiply(\n inner + block_start1,\n last_index_array_byte_stride))]);\n auto& offset = offsets_array[outer * block_shape[1] + inner];\n offset = wrap_on_overflow::Add(offset, cur_contribution);\n }\n }\n }\n }\n }\n *pointer = block_pointer;\n return true;\n }\n private:\n DimensionIndex num_index_arrays_;\n DimensionIndex num_index_array_iteration_dims_;\n const IterableImpl* iterable_;\n IterationBufferPointer pointer_;\n std::vector> buffer_;\n };\n std::shared_ptr data_owner_;\n IndexTransform<> transform_;\n internal_index_space::SingleArrayIterationState state_;\n DataType dtype_;\n std::vector>\n input_dimension_flags_;\n};\nResult MaybeConvertToArrayNDIterable(\n std::unique_ptr impl, Arena* arena) {\n if (impl->state_.num_array_indexed_output_dimensions == 0) {\n return GetArrayNDIterable(\n SharedOffsetArrayView(\n SharedElementPointer(\n std::shared_ptr(std::move(impl->data_owner_),\n impl->state_.base_pointer),\n impl->dtype_),\n StridedLayoutView<>(impl->transform_.input_rank(),\n impl->transform_.input_shape().data(),\n &impl->state_.input_byte_strides[0])),\n arena);\n }\n return impl;\n}\n} \nResult GetTransformedArrayNDIterable(\n SharedOffsetArrayView array, IndexTransformView<> transform,\n Arena* arena) {\n if (!transform.valid()) {\n return GetArrayNDIterable(array, arena);\n }\n auto impl = MakeUniqueWithVirtualIntrusiveAllocator(\n ArenaAllocator<>(arena), transform);\n TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(\n array, internal_index_space::TransformAccess::rep(transform),\n transform.input_origin().data(), transform.input_shape().data(),\n &impl->state_, impl->input_dimension_flags_.data()));\n impl->dtype_ = array.dtype();\n impl->data_owner_ = std::move(array.element_pointer().pointer());\n return MaybeConvertToArrayNDIterable(std::move(impl), arena);\n}\nResult GetTransformedArrayNDIterable(\n TransformedArray> array, Arena* arena) {\n auto impl = MakeUniqueWithVirtualIntrusiveAllocator(\n ArenaAllocator<>(arena), std::move(array.transform()));\n TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(\n ElementPointer(array.element_pointer()),\n internal_index_space::TransformAccess::rep(impl->transform_),\n impl->transform_.input_origin().data(),\n impl->transform_.input_shape().data(), &impl->state_,\n impl->input_dimension_flags_.data()));\n impl->dtype_ = array.dtype();\n impl->data_owner_ = std::move(array.element_pointer().pointer());\n return MaybeConvertToArrayNDIterable(std::move(impl), arena);\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/internal/nditerable_transformed_array.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/status/status.h\"\n#include \"tensorstore/array.h\"\n#include \"tensorstore/array_testutil.h\"\n#include \"tensorstore/contiguous_layout.h\"\n#include \"tensorstore/data_type.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_space/dim_expression.h\"\n#include \"tensorstore/index_space/index_transform.h\"\n#include \"tensorstore/index_space/index_transform_builder.h\"\n#include \"tensorstore/index_space/transformed_array.h\"\n#include \"tensorstore/internal/arena.h\"\n#include \"tensorstore/internal/elementwise_function.h\"\n#include \"tensorstore/internal/nditerable.h\"\n#include \"tensorstore/internal/nditerable_buffer_management.h\"\n#include \"tensorstore/strided_layout.h\"\n#include \"tensorstore/util/element_pointer.h\"\n#include \"tensorstore/util/iterate.h\"\n#include \"tensorstore/util/result.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status_testutil.h\"\nnamespace {\nusing ::tensorstore::AllocateArray;\nusing ::tensorstore::Index;\nusing ::tensorstore::IndexTransformBuilder;\nusing ::tensorstore::kImplicit;\nusing ::tensorstore::MakeArray;\nusing ::tensorstore::MatchesStatus;\nusing ::tensorstore::Result;\nusing ::tensorstore::Shared;\nusing ::tensorstore::SharedArray;\nusing ::tensorstore::skip_repeated_elements;\nusing ::tensorstore::StridedLayout;\nusing ::tensorstore::TransformedArray;\nusing ::tensorstore::internal::Arena;\nusing ::tensorstore::internal::GetTransformedArrayNDIterable;\nusing ::tensorstore::internal::IterationBufferKind;\nusing ::tensorstore::internal::IterationBufferShape;\nusing ::tensorstore::internal::MultiNDIterator;\nusing ::tensorstore::internal::NDIterable;\nusing ::testing::ElementsAre;\nusing ::testing::ElementsAreArray;\nusing ::testing::FieldsAre;\nusing ::testing::Pair;\nusing IterationTrace = std::vector;\ntemplate \nstd::pair, absl::Status>\nGetIterationTrace(\n MultiNDIterator* multi_iterator) {\n std::pair, absl::Status>\n result;\n for (auto block_shape = multi_iterator->ResetAtBeginning();\n block_shape[0] && block_shape[1];\n block_shape = multi_iterator->StepForward(block_shape)) {\n if (!multi_iterator->GetBlock(block_shape, &result.second)) {\n break;\n }\n ptrdiff_t i = 0;\n const auto unused = {(\n [&] {\n const auto get_trace_func = [](void* ptr, IterationTrace* trace) {\n trace->push_back(ptr);\n };\n tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =\n tensorstore::internal::SimpleElementwiseFunction<\n decltype(get_trace_func)(Element), IterationTrace*>();\n func[multi_iterator->buffer_kind](nullptr, block_shape,\n multi_iterator->block_pointers()[i],\n &result.first[i]);\n ++i;\n }(),\n 0)...};\n (void)unused;\n }\n return result;\n}\ntemplate \nusing BlockTrace =\n std::vector, IterationBufferShape,\n std::array>>;\ntemplate \nstd::pair, absl::Status> GetBlockTrace(\n MultiNDIterator* multi_iterator) {\n std::pair, absl::Status> result;\n for (auto block_shape = multi_iterator->ResetAtBeginning();\n block_shape[0] && block_shape[1];\n block_shape = multi_iterator->StepForward(block_shape)) {\n if (!multi_iterator->GetBlock(block_shape, &result.second)) {\n break;\n }\n auto& [position, shape, traces] = result.first.emplace_back();\n position.assign(multi_iterator->position().begin(),\n multi_iterator->position().end());\n shape = block_shape;\n ptrdiff_t i = 0;\n const auto unused = {(\n [&, traces_ptr = &traces[i]] {\n const auto get_trace_func = [](void* ptr, IterationTrace* trace) {\n trace->push_back(ptr);\n };\n tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =\n tensorstore::internal::SimpleElementwiseFunction<\n decltype(get_trace_func)(Element), IterationTrace*>();\n func[multi_iterator->buffer_kind](nullptr, block_shape,\n multi_iterator->block_pointers()[i],\n traces_ptr);\n ++i;\n }(),\n 0)...};\n (void)unused;\n }\n return result;\n}\nclass MaybeDirectTest : public ::testing::TestWithParam {\n protected:\n Arena arena;\n Result GetMaybeDirectTransformedArrayNDIterable(\n tensorstore::SharedOffsetArrayView array,\n tensorstore::IndexTransformView<> transform) {\n if (GetParam()) {\n TENSORSTORE_ASSIGN_OR_RETURN(auto transformed_array,\n MakeTransformedArray(array, transform));\n return GetTransformedArrayNDIterable(std::move(transformed_array),\n &arena);\n } else {\n return GetTransformedArrayNDIterable(std::move(array), transform, &arena);\n }\n }\n};\nINSTANTIATE_TEST_SUITE_P(Indirect, MaybeDirectTest, ::testing::Values(true));\nINSTANTIATE_TEST_SUITE_P(Direct, MaybeDirectTest, ::testing::Values(false));\nTEST(NDIterableTransformedArrayTest, Strided) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, SingleIndexedDimension) {\n Arena arena;\n auto a = AllocateArray({4});\n auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(\n MakeArray({1, 2, 3, 0})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n EXPECT_EQ(tensorstore::dtype_v, iterable->dtype());\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));\n EXPECT_THAT(GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(1), &a(2), &a(3), &a(0))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest,\n OneStridedOneIndexedDimensionIndexedBuffer) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));\n EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));\n EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),\n &a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest,\n TwoStridedOneIndexedDimensionContiguousBuffer) {\n Arena arena;\n auto a = AllocateArray({2, 3, 2});\n auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));\n EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));\n EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAreArray( \n {\n &a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1), \n &a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1), \n &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1), \n &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1) \n })),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest,\n TwoStridedOneIndexedDimensionStridedBuffer) {\n Arena arena;\n auto a = AllocateArray({2, 3, 4});\n auto ta = (a | tensorstore::Dims(2).Stride(2) |\n tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));\n EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));\n EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAreArray( \n {\n &a(0, 0, 0), &a(0, 0, 2), &a(1, 0, 0), &a(1, 0, 2), \n &a(0, 2, 0), &a(0, 2, 2), &a(1, 2, 0), &a(1, 2, 2), \n &a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2), \n &a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2) \n })),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest,\n TwoStridedOneIndexedDimensionIndexedBuffer) {\n Arena arena;\n auto a = AllocateArray({2, 3, 2});\n auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})))\n .value();\n auto tb =\n (a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray({0, 1})) |\n tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})))\n .value();\n auto iterable1 = GetTransformedArrayNDIterable(ta, &arena).value();\n auto iterable2 = GetTransformedArrayNDIterable(tb, &arena).value();\n MultiNDIterator<2, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable1.get(), iterable2.get()}},\n &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));\n EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));\n EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);\n auto element_matcher = ElementsAreArray( \n {\n &a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1), \n &a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1), \n &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1), \n &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1) \n });\n EXPECT_THAT(\n (GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(element_matcher, element_matcher), absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedAndReversedStrided) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a |\n tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 2, 1, 1})) |\n tensorstore::Dims(0).SizedInterval(kImplicit, kImplicit, -1))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));\n EXPECT_THAT(multi_iterator.directions, ElementsAre(-1, 1));\n EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),\n &a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedCombine) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({{0, 2}, {2, 0}})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),\n &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedCombinePartiallyReversed) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a | tensorstore::Dims(1)\n .OuterIndexArraySlice(MakeArray({{0, 2}, {2, 0}}))\n .SizedInterval(kImplicit, kImplicit, {1, -1}))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));\n EXPECT_THAT(multi_iterator.directions, ElementsAre(1, 1, -1));\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),\n &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedCombineBothReversed) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a | tensorstore::Dims(1)\n .OuterIndexArraySlice(MakeArray({{0, 2}, {2, 0}}))\n .SizedInterval(kImplicit, kImplicit, -1))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));\n EXPECT_THAT(multi_iterator.directions, ElementsAre(1, -1, -1));\n EXPECT_THAT(\n GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),\n &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedVsStrided) {\n Arena arena;\n auto a = AllocateArray({2, 2});\n auto b = AllocateArray({2, 3});\n auto tb =\n (b | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray({0, 2})))\n .value();\n auto iterable_a = GetTransformedArrayNDIterable(a, &arena).value();\n auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();\n MultiNDIterator<2, true> multi_iterator(\n tb.shape(), skip_repeated_elements,\n {{iterable_a.get(), iterable_b.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));\n EXPECT_THAT(\n (GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 1), &a(1, 1)),\n ElementsAre(&b(0, 0), &b(1, 0), &b(0, 2), &b(1, 2))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IndexedWith2StridedDims) {\n Arena arena;\n auto a = AllocateArray({2, 2, 3});\n auto ta =\n (a | tensorstore::Dims(1).MoveToFront() |\n tensorstore::Dims(2).OuterIndexArraySlice(MakeArray({0, 2, 1})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));\n EXPECT_THAT(GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(\n &a(0, 0, 0), &a(0, 1, 0), &a(1, 0, 0), &a(1, 1, 0),\n &a(0, 0, 2), &a(0, 1, 2), &a(1, 0, 2), &a(1, 1, 2),\n &a(0, 0, 1), &a(0, 1, 1), &a(1, 0, 1), &a(1, 1, 1))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, TwoIndexedDims) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta =\n (a |\n tensorstore::Dims(0).OuterIndexArraySlice(MakeArray({0, 1, 1})) |\n tensorstore::Dims(1).OuterIndexArraySlice(MakeArray({0, 2})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));\n EXPECT_THAT(GetIterationTrace(&multi_iterator),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0),\n &a(1, 2), &a(1, 0), &a(1, 2))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, FourIndexedDims) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a |\n tensorstore::Dims(0).OuterIndexArraySlice(\n MakeArray({{0, 1}, {1, 1}})) |\n tensorstore::Dims(-1).OuterIndexArraySlice(\n MakeArray({{0, 2}, {1, 0}})))\n .value();\n auto b = AllocateArray({2, 2, 2, 2});\n auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();\n auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();\n MultiNDIterator<2, true> multi_iterator(\n ta.shape(), skip_repeated_elements,\n {{iterable_a.get(), iterable_b.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));\n EXPECT_THAT( \n (GetIterationTrace(&multi_iterator)),\n Pair( \n ElementsAre( \n ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),\n ElementsAre( \n b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,\n b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,\n b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,\n b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, LastTwoDimsStrided) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto ta = (a |\n tensorstore::Dims(0).OuterIndexArraySlice(\n MakeArray({{0, 1}, {1, 1}})) |\n tensorstore::Dims(-1).OuterIndexArraySlice(\n MakeArray({{0, 2}, {1, 0}})))\n .value();\n auto b = AllocateArray({2, 2, 2, 2});\n auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();\n auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();\n MultiNDIterator<2, true> multi_iterator(\n ta.shape(), skip_repeated_elements,\n {{iterable_a.get(), iterable_b.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));\n EXPECT_THAT( \n (GetIterationTrace(&multi_iterator)),\n Pair( \n ElementsAre( \n ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), \n &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),\n ElementsAre( \n b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,\n b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,\n b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,\n b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, TwoTransformedArrays) {\n Arena arena;\n auto a = AllocateArray({2, 3});\n auto b = AllocateArray({2, 3});\n auto ta =\n (a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray({0, 1})))\n .value();\n auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice(\n MakeArray({0, 1, 2})))\n .value();\n auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();\n auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();\n MultiNDIterator<2, true> multi_iterator(\n ta.shape(), skip_repeated_elements,\n {{iterable_a.get(), iterable_b.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));\n EXPECT_THAT((GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 1), &a(0, 2),\n &a(1, 0), &a(1, 1), &a(1, 2)),\n ElementsAre(&b(0, 0), &b(0, 1), &b(0, 2),\n &b(1, 0), &b(1, 1), &b(1, 2))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, ZeroRankIndexArray) {\n Arena arena;\n SharedArray index_array{std::make_shared(3),\n StridedLayout<>({5}, {0})};\n int data[100];\n TENSORSTORE_ASSERT_OK_AND_ASSIGN(\n auto transform,\n IndexTransformBuilder(1, 1)\n .input_shape({5})\n .output_index_array(0, sizeof(int) * 2, sizeof(int) * 4, index_array)\n .Finalize());\n auto iterable_a = GetTransformedArrayNDIterable(\n {tensorstore::UnownedToShared(\n tensorstore::ElementPointer(&data[0])),\n transform},\n &arena)\n .value();\n MultiNDIterator<1, true> multi_iterator(\n transform.input_shape(), skip_repeated_elements, {{iterable_a.get()}},\n &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, -1));\n EXPECT_THAT(\n (GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(ElementsAre(&data[4 * 3 + 2])), absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, OutOfBoundsConstant) {\n Arena arena;\n auto a = AllocateArray({5});\n auto transform = IndexTransformBuilder<1, 1>()\n .input_shape({5})\n .output_constant(0, 8)\n .Finalize()\n .value();\n EXPECT_THAT(\n GetTransformedArrayNDIterable(a, transform, &arena),\n MatchesStatus(absl::StatusCode::kOutOfRange,\n \"Checking bounds of constant output index map for \"\n \"dimension 0: Index 8 is outside valid range \\\\[0, 5\\\\)\"));\n}\nTEST(NDIterableTransformedArrayTest, NullTransform) {\n Arena arena;\n auto a = AllocateArray({5});\n auto iterable_a = GetTransformedArrayNDIterable(a, {}, &arena).value();\n EXPECT_EQ(tensorstore::dtype_v, iterable_a->dtype());\n MultiNDIterator<1, true> multi_iterator(\n a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));\n EXPECT_THAT((GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, IdentityTransform) {\n Arena arena;\n auto a = AllocateArray({5});\n auto iterable_a =\n GetTransformedArrayNDIterable(\n a,\n tensorstore::IdentityTransform(tensorstore::span({5})),\n &arena)\n .value();\n EXPECT_EQ(tensorstore::dtype_v, iterable_a->dtype());\n MultiNDIterator<1, true> multi_iterator(\n a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));\n EXPECT_THAT((GetIterationTrace(&multi_iterator)),\n Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest, OutOfBoundsSingleInputDimension) {\n Arena arena;\n auto a = AllocateArray({5});\n auto transform = IndexTransformBuilder<1, 1>()\n .input_shape({5})\n .output_single_input_dimension(0, 2, 1, 0)\n .Finalize()\n .value();\n EXPECT_THAT(GetTransformedArrayNDIterable(a, transform, &arena),\n MatchesStatus(absl::StatusCode::kOutOfRange,\n \"Output dimension 0 range of \\\\[2, 7\\\\) is not \"\n \"contained within array domain of \\\\[0, 5\\\\)\"));\n}\nTEST_P(MaybeDirectTest, OutOfBoundsIndexArray) {\n auto a = AllocateArray({5});\n auto transform =\n IndexTransformBuilder<1, 1>()\n .input_shape({5})\n .output_index_array(0, 2, 1, MakeArray({0, 0, 0, 0, 42}))\n .Finalize()\n .value();\n EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),\n MatchesStatus(absl::StatusCode::kOutOfRange,\n \".*Index 42 is outside valid range \\\\[-2, 3\\\\)\"));\n}\nTEST_P(MaybeDirectTest, OutOfBoundsSingletonIndexArray) {\n SharedArray index_array{std::make_shared(42),\n StridedLayout<>({5}, {0})};\n auto a = AllocateArray({5});\n auto transform = IndexTransformBuilder<1, 1>()\n .input_shape({5})\n .output_index_array(0, 2, 1, index_array)\n .Finalize()\n .value();\n EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),\n MatchesStatus(absl::StatusCode::kOutOfRange,\n \".*Index 42 is outside valid range \\\\[-2, 3\\\\)\"));\n}\nTEST(NDIterableTransformedArrayTest, BlockTraceThreeStridedDimensions) {\n Arena arena;\n auto a = AllocateArray({2, 5, 3});\n auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n MultiNDIterator<1, true> multi_iterator(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);\n EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);\n EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1, 2));\n EXPECT_THAT(\n GetBlockTrace(&multi_iterator),\n Pair(ElementsAre(FieldsAre(ElementsAre(0, 0, 0), ElementsAre(2, 3),\n ElementsAre(ElementsAreArray({\n &a(0, 0, 0),\n &a(0, 0, 1),\n &a(0, 0, 2),\n &a(0, 2, 0),\n &a(0, 2, 1),\n &a(0, 2, 2),\n }))),\n FieldsAre(ElementsAre(1, 0, 0), ElementsAre(2, 3),\n ElementsAre(ElementsAreArray({\n &a(1, 0, 0),\n &a(1, 0, 1),\n &a(1, 0, 2),\n &a(1, 2, 0),\n &a(1, 2, 1),\n &a(1, 2, 2),\n })))),\n absl::OkStatus()));\n}\nTEST(NDIterableTransformedArrayTest,\n InnermostBlockSizeLessThanInnermostIterationSize) {\n Arena arena;\n auto a = AllocateArray({2, 32768}, tensorstore::c_order,\n tensorstore::value_init);\n auto ta = (a | tensorstore::Dims(0).IndexArraySlice(MakeArray({0, 1})))\n .value();\n auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();\n struct IncrementValue {\n void operator()(int* x) const { *x += 1; }\n };\n constexpr tensorstore::internal::ElementwiseFunction<1> increment_value_func =\n tensorstore::internal::SimpleElementwiseFunction();\n TENSORSTORE_ASSERT_OK(\n (tensorstore::internal::IterateOverNDIterables<1, true>(\n ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena,\n {&increment_value_func, nullptr})));\n EXPECT_THAT(a, tensorstore::MatchesArray(\n tensorstore::BroadcastArray(\n tensorstore::MakeScalarArray(1), a.shape())\n .value()));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}},{"rowIdx":293,"cells":{"ID":{"kind":"string","value":"03d1f556-a605-4615-a23e-2c5adb2cbce3"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/quiche"},"File Name":{"kind":"string","value":"quic_idle_network_detector"},"File Path in Repository":{"kind":"string","value":"quiche/quic/core/quic_idle_network_detector.cc"},"File Path for Unit Test":{"kind":"string","value":"quiche/quic/core/quic_idle_network_detector_test.cc"},"Code":{"kind":"string","value":"#include \"quiche/quic/core/quic_idle_network_detector.h\"\n#include \n#include \"quiche/quic/core/quic_constants.h\"\n#include \"quiche/quic/core/quic_time.h\"\n#include \"quiche/quic/platform/api/quic_flag_utils.h\"\n#include \"quiche/quic/platform/api/quic_flags.h\"\n#include \"quiche/common/platform/api/quiche_logging.h\"\nnamespace quic {\nnamespace {\n} \nQuicIdleNetworkDetector::QuicIdleNetworkDetector(Delegate* delegate,\n QuicTime now, QuicAlarm* alarm)\n : delegate_(delegate),\n start_time_(now),\n handshake_timeout_(QuicTime::Delta::Infinite()),\n time_of_last_received_packet_(now),\n time_of_first_packet_sent_after_receiving_(QuicTime::Zero()),\n idle_network_timeout_(QuicTime::Delta::Infinite()),\n alarm_(*alarm) {}\nvoid QuicIdleNetworkDetector::OnAlarm() {\n if (handshake_timeout_.IsInfinite()) {\n delegate_->OnIdleNetworkDetected();\n return;\n }\n if (idle_network_timeout_.IsInfinite()) {\n delegate_->OnHandshakeTimeout();\n return;\n }\n if (last_network_activity_time() + idle_network_timeout_ >\n start_time_ + handshake_timeout_) {\n delegate_->OnHandshakeTimeout();\n return;\n }\n delegate_->OnIdleNetworkDetected();\n}\nvoid QuicIdleNetworkDetector::SetTimeouts(\n QuicTime::Delta handshake_timeout, QuicTime::Delta idle_network_timeout) {\n handshake_timeout_ = handshake_timeout;\n idle_network_timeout_ = idle_network_timeout;\n SetAlarm();\n}\nvoid QuicIdleNetworkDetector::StopDetection() {\n alarm_.PermanentCancel();\n handshake_timeout_ = QuicTime::Delta::Infinite();\n idle_network_timeout_ = QuicTime::Delta::Infinite();\n handshake_timeout_ = QuicTime::Delta::Infinite();\n stopped_ = true;\n}\nvoid QuicIdleNetworkDetector::OnPacketSent(QuicTime now,\n QuicTime::Delta pto_delay) {\n if (time_of_first_packet_sent_after_receiving_ >\n time_of_last_received_packet_) {\n return;\n }\n time_of_first_packet_sent_after_receiving_ =\n std::max(time_of_first_packet_sent_after_receiving_, now);\n if (shorter_idle_timeout_on_sent_packet_) {\n MaybeSetAlarmOnSentPacket(pto_delay);\n return;\n }\n SetAlarm();\n}\nvoid QuicIdleNetworkDetector::OnPacketReceived(QuicTime now) {\n time_of_last_received_packet_ = std::max(time_of_last_received_packet_, now);\n SetAlarm();\n}\nvoid QuicIdleNetworkDetector::SetAlarm() {\n if (stopped_) {\n QUIC_BUG(quic_idle_detector_set_alarm_after_stopped)\n << \"SetAlarm called after stopped\";\n return;\n }\n QuicTime new_deadline = QuicTime::Zero();\n if (!handshake_timeout_.IsInfinite()) {\n new_deadline = start_time_ + handshake_timeout_;\n }\n if (!idle_network_timeout_.IsInfinite()) {\n const QuicTime idle_network_deadline = GetIdleNetworkDeadline();\n if (new_deadline.IsInitialized()) {\n new_deadline = std::min(new_deadline, idle_network_deadline);\n } else {\n new_deadline = idle_network_deadline;\n }\n }\n alarm_.Update(new_deadline, kAlarmGranularity);\n}\nvoid QuicIdleNetworkDetector::MaybeSetAlarmOnSentPacket(\n QuicTime::Delta pto_delay) {\n QUICHE_DCHECK(shorter_idle_timeout_on_sent_packet_);\n if (!handshake_timeout_.IsInfinite() || !alarm_.IsSet()) {\n SetAlarm();\n return;\n }\n const QuicTime deadline = alarm_.deadline();\n const QuicTime min_deadline = last_network_activity_time() + pto_delay;\n if (deadline > min_deadline) {\n return;\n }\n alarm_.Update(min_deadline, kAlarmGranularity);\n}\nQuicTime QuicIdleNetworkDetector::GetIdleNetworkDeadline() const {\n if (idle_network_timeout_.IsInfinite()) {\n return QuicTime::Zero();\n }\n return last_network_activity_time() + idle_network_timeout_;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"quiche/quic/core/quic_idle_network_detector.h\"\n#include \"quiche/quic/core/quic_connection_alarms.h\"\n#include \"quiche/quic/core/quic_one_block_arena.h\"\n#include \"quiche/quic/core/quic_time.h\"\n#include \"quiche/quic/platform/api/quic_expect_bug.h\"\n#include \"quiche/quic/platform/api/quic_flags.h\"\n#include \"quiche/quic/platform/api/quic_test.h\"\n#include \"quiche/quic/test_tools/mock_quic_connection_alarms.h\"\n#include \"quiche/quic/test_tools/quic_test_utils.h\"\nnamespace quic {\nnamespace test {\nclass QuicIdleNetworkDetectorTestPeer {\n public:\n static QuicAlarm& GetAlarm(QuicIdleNetworkDetector* detector) {\n return detector->alarm_;\n }\n};\nnamespace {\nclass MockDelegate : public QuicIdleNetworkDetector::Delegate {\n public:\n MOCK_METHOD(void, OnHandshakeTimeout, (), (override));\n MOCK_METHOD(void, OnIdleNetworkDetected, (), (override));\n};\nclass QuicIdleNetworkDetectorTest : public QuicTest {\n public:\n QuicIdleNetworkDetectorTest()\n : alarms_(&connection_alarms_delegate_, alarm_factory_, arena_),\n detector_(&delegate_, clock_.Now() + QuicTimeDelta::FromSeconds(1),\n &alarms_.idle_network_detector_alarm()) {\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));\n alarm_ = static_cast(\n &alarms_.idle_network_detector_alarm());\n ON_CALL(connection_alarms_delegate_, OnIdleDetectorAlarm())\n .WillByDefault([&] { detector_.OnAlarm(); });\n }\n protected:\n testing::StrictMock delegate_;\n MockConnectionAlarmsDelegate connection_alarms_delegate_;\n QuicConnectionArena arena_;\n MockAlarmFactory alarm_factory_;\n QuicConnectionAlarms alarms_;\n MockClock clock_;\n QuicIdleNetworkDetector detector_;\n MockAlarmFactory::TestAlarm* alarm_;\n};\nTEST_F(QuicIdleNetworkDetectorTest,\n IdleNetworkDetectedBeforeHandshakeCompletes) {\n EXPECT_FALSE(alarm_->IsSet());\n detector_.SetTimeouts(\n QuicTime::Delta::FromSeconds(30),\n QuicTime::Delta::FromSeconds(20));\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(20));\n EXPECT_CALL(delegate_, OnIdleNetworkDetected());\n alarm_->Fire();\n}\nTEST_F(QuicIdleNetworkDetectorTest, HandshakeTimeout) {\n EXPECT_FALSE(alarm_->IsSet());\n detector_.SetTimeouts(\n QuicTime::Delta::FromSeconds(30),\n QuicTime::Delta::FromSeconds(20));\n EXPECT_TRUE(alarm_->IsSet());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));\n detector_.OnPacketReceived(clock_.Now());\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(15),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));\n EXPECT_CALL(delegate_, OnHandshakeTimeout());\n alarm_->Fire();\n}\nTEST_F(QuicIdleNetworkDetectorTest,\n IdleNetworkDetectedAfterHandshakeCompletes) {\n EXPECT_FALSE(alarm_->IsSet());\n detector_.SetTimeouts(\n QuicTime::Delta::FromSeconds(30),\n QuicTime::Delta::FromSeconds(20));\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));\n detector_.OnPacketReceived(clock_.Now());\n detector_.SetTimeouts(\n QuicTime::Delta::Infinite(),\n QuicTime::Delta::FromSeconds(600));\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600));\n EXPECT_CALL(delegate_, OnIdleNetworkDetected());\n alarm_->Fire();\n}\nTEST_F(QuicIdleNetworkDetectorTest,\n DoNotExtendIdleDeadlineOnConsecutiveSentPackets) {\n EXPECT_FALSE(alarm_->IsSet());\n detector_.SetTimeouts(\n QuicTime::Delta::FromSeconds(30),\n QuicTime::Delta::FromSeconds(20));\n EXPECT_TRUE(alarm_->IsSet());\n clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));\n detector_.OnPacketReceived(clock_.Now());\n detector_.SetTimeouts(\n QuicTime::Delta::Infinite(),\n QuicTime::Delta::FromSeconds(600));\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));\n detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero());\n const QuicTime packet_sent_time = clock_.Now();\n EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));\n detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero());\n EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600) -\n QuicTime::Delta::FromMilliseconds(200));\n EXPECT_CALL(delegate_, OnIdleNetworkDetected());\n alarm_->Fire();\n}\nTEST_F(QuicIdleNetworkDetectorTest, ShorterIdleTimeoutOnSentPacket) {\n detector_.enable_shorter_idle_timeout_on_sent_packet();\n QuicTime::Delta idle_network_timeout = QuicTime::Delta::Zero();\n idle_network_timeout = QuicTime::Delta::FromSeconds(30);\n detector_.SetTimeouts(\n QuicTime::Delta::Infinite(), idle_network_timeout);\n EXPECT_TRUE(alarm_->IsSet());\n const QuicTime deadline = alarm_->deadline();\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30), deadline);\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));\n detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(deadline, alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(14));\n detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(deadline, alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));\n detector_.OnPacketReceived(clock_.Now());\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30),\n alarm_->deadline());\n clock_.AdvanceTime(QuicTime::Delta::FromSeconds(29));\n detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));\n EXPECT_TRUE(alarm_->IsSet());\n EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(2), alarm_->deadline());\n}\nTEST_F(QuicIdleNetworkDetectorTest, NoAlarmAfterStopped) {\n detector_.StopDetection();\n EXPECT_QUIC_BUG(\n detector_.SetTimeouts(\n QuicTime::Delta::FromSeconds(30),\n QuicTime::Delta::FromSeconds(20)),\n \"SetAlarm called after stopped\");\n EXPECT_FALSE(alarm_->IsSet());\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector_test.cc"},"Commit Hash":{"kind":"string","value":"6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6"}}},{"rowIdx":294,"cells":{"ID":{"kind":"string","value":"32f38623-1d99-4128-9f44-0ec7c874d2fa"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"abseil/abseil-cpp"},"File Name":{"kind":"string","value":"log_entry"},"File Path in Repository":{"kind":"string","value":"absl/log/log_entry.cc"},"File Path for Unit Test":{"kind":"string","value":"absl/log/log_entry_test.cc"},"Code":{"kind":"string","value":"#include \"absl/log/log_entry.h\"\n#include \"absl/base/config.h\"\nnamespace absl {\nABSL_NAMESPACE_BEGIN\n#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL\nconstexpr int LogEntry::kNoVerbosityLevel;\nconstexpr int LogEntry::kNoVerboseLevel;\n#endif\n#ifdef __APPLE__\nnamespace log_internal {\nextern const char kAvoidEmptyLogEntryLibraryWarning;\nconst char kAvoidEmptyLogEntryLibraryWarning = 0;\n} \n#endif \nABSL_NAMESPACE_END\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"absl/log/log_entry.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"gmock/gmock.h\"\n#include \"gtest/gtest.h\"\n#include \"absl/base/attributes.h\"\n#include \"absl/base/config.h\"\n#include \"absl/base/log_severity.h\"\n#include \"absl/log/internal/append_truncated.h\"\n#include \"absl/log/internal/log_format.h\"\n#include \"absl/log/internal/test_helpers.h\"\n#include \"absl/strings/numbers.h\"\n#include \"absl/strings/str_split.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/time/civil_time.h\"\n#include \"absl/time/time.h\"\n#include \"absl/types/span.h\"\nnamespace {\nusing ::absl::log_internal::LogEntryTestPeer;\nusing ::testing::Eq;\nusing ::testing::IsTrue;\nusing ::testing::StartsWith;\nusing ::testing::StrEq;\nauto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(\n new absl::log_internal::LogTestEnvironment);\n} \nnamespace absl {\nABSL_NAMESPACE_BEGIN\nnamespace log_internal {\nclass LogEntryTestPeer {\n public:\n LogEntryTestPeer(absl::string_view base_filename, int line, bool prefix,\n absl::LogSeverity severity, absl::string_view timestamp,\n absl::LogEntry::tid_t tid, PrefixFormat format,\n absl::string_view text_message)\n : format_{format}, buf_(15000, '\\0') {\n entry_.base_filename_ = base_filename;\n entry_.line_ = line;\n entry_.prefix_ = prefix;\n entry_.severity_ = severity;\n std::string time_err;\n EXPECT_THAT(\n absl::ParseTime(\"%Y-%m-%d%ET%H:%M:%E*S\", timestamp,\n absl::LocalTimeZone(), &entry_.timestamp_, &time_err),\n IsTrue())\n << \"Failed to parse time \" << timestamp << \": \" << time_err;\n entry_.tid_ = tid;\n std::pair timestamp_bits =\n absl::StrSplit(timestamp, absl::ByChar('.'));\n EXPECT_THAT(absl::ParseCivilTime(timestamp_bits.first, &ci_.cs), IsTrue())\n << \"Failed to parse time \" << timestamp_bits.first;\n timestamp_bits.second.resize(9, '0');\n int64_t nanos = 0;\n EXPECT_THAT(absl::SimpleAtoi(timestamp_bits.second, &nanos), IsTrue())\n << \"Failed to parse time \" << timestamp_bits.first;\n ci_.subsecond = absl::Nanoseconds(nanos);\n absl::Span view = absl::MakeSpan(buf_);\n view.remove_suffix(2);\n entry_.prefix_len_ =\n entry_.prefix_\n ? log_internal::FormatLogPrefix(\n entry_.log_severity(), entry_.timestamp(), entry_.tid(),\n entry_.source_basename(), entry_.source_line(), format_, view)\n : 0;\n EXPECT_THAT(entry_.prefix_len_,\n Eq(static_cast(view.data() - buf_.data())));\n log_internal::AppendTruncated(text_message, view);\n view = absl::Span(view.data(), view.size() + 2);\n view[0] = '\\n';\n view[1] = '\\0';\n view.remove_prefix(2);\n buf_.resize(static_cast(view.data() - buf_.data()));\n entry_.text_message_with_prefix_and_newline_and_nul_ = absl::MakeSpan(buf_);\n }\n LogEntryTestPeer(const LogEntryTestPeer&) = delete;\n LogEntryTestPeer& operator=(const LogEntryTestPeer&) = delete;\n std::string FormatLogMessage() const {\n return log_internal::FormatLogMessage(\n entry_.log_severity(), ci_.cs, ci_.subsecond, entry_.tid(),\n entry_.source_basename(), entry_.source_line(), format_,\n entry_.text_message());\n }\n std::string FormatPrefixIntoSizedBuffer(size_t sz) {\n std::string str(sz, '\\0');\n absl::Span buf(&str[0], str.size());\n const size_t prefix_size = log_internal::FormatLogPrefix(\n entry_.log_severity(), entry_.timestamp(), entry_.tid(),\n entry_.source_basename(), entry_.source_line(), format_, buf);\n EXPECT_THAT(prefix_size, Eq(static_cast(buf.data() - str.data())));\n str.resize(prefix_size);\n return str;\n }\n const absl::LogEntry& entry() const { return entry_; }\n private:\n absl::LogEntry entry_;\n PrefixFormat format_;\n absl::TimeZone::CivilInfo ci_;\n std::vector buf_;\n};\n} \nABSL_NAMESPACE_END\n} \nnamespace {\nconstexpr bool kUsePrefix = true, kNoPrefix = false;\nTEST(LogEntryTest, Baseline) {\n LogEntryTestPeer entry(\"foo.cc\", 1234, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.6789\", 451,\n absl::log_internal::PrefixFormat::kNotRaw,\n \"hello world\");\n EXPECT_THAT(entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] hello world\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] \"));\n for (size_t sz = strlen(\"I0102 03:04:05.678900 451 foo.cc:1234] \") + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\"I0102 03:04:05.678900 451 foo.cc:1234] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] hello world\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678900 451 foo.cc:1234] hello world\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] hello world\"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"hello world\"));\n}\nTEST(LogEntryTest, NoPrefix) {\n LogEntryTestPeer entry(\"foo.cc\", 1234, kNoPrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.6789\", 451,\n absl::log_internal::PrefixFormat::kNotRaw,\n \"hello world\");\n EXPECT_THAT(entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] hello world\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] \"));\n for (size_t sz = strlen(\"I0102 03:04:05.678900 451 foo.cc:1234] \") + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\"I0102 03:04:05.678900 451 foo.cc:1234] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"hello world\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"hello world\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq(\"hello world\"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"hello world\"));\n}\nTEST(LogEntryTest, EmptyFields) {\n LogEntryTestPeer entry(\"\", 0, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05\", 0,\n absl::log_internal::PrefixFormat::kNotRaw, \"\");\n const std::string format_message = entry.FormatLogMessage();\n EXPECT_THAT(format_message, Eq(\"I0102 03:04:05.000000 0 :0] \"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq(format_message));\n for (size_t sz = format_message.size() + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(format_message,\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.000000 0 :0] \\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.000000 0 :0] \\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.000000 0 :0] \"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"\"));\n}\nTEST(LogEntryTest, NegativeFields) {\n if (std::is_signed::value) {\n LogEntryTestPeer entry(\n \"foo.cc\", -1234, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.6789\", static_cast(-451),\n absl::log_internal::PrefixFormat::kNotRaw, \"hello world\");\n EXPECT_THAT(entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678900 -451 foo.cc:-1234] \"));\n for (size_t sz =\n strlen(\"I0102 03:04:05.678900 -451 foo.cc:-1234] \") + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\"I0102 03:04:05.678900 -451 foo.cc:-1234] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"hello world\"));\n } else {\n LogEntryTestPeer entry(\"foo.cc\", -1234, kUsePrefix,\n absl::LogSeverity::kInfo, \"2020-01-02T03:04:05.6789\",\n 451, absl::log_internal::PrefixFormat::kNotRaw,\n \"hello world\");\n EXPECT_THAT(entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:-1234] hello world\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:-1234] \"));\n for (size_t sz =\n strlen(\"I0102 03:04:05.678900 451 foo.cc:-1234] \") + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\"I0102 03:04:05.678900 451 foo.cc:-1234] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:-1234] hello world\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678900 451 foo.cc:-1234] hello world\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:-1234] hello world\"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"hello world\"));\n }\n}\nTEST(LogEntryTest, LongFields) {\n LogEntryTestPeer entry(\n \"I am the very model of a modern Major-General / \"\n \"I've information vegetable, animal, and mineral.\",\n 2147483647, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.678967896789\", 2147483647,\n absl::log_internal::PrefixFormat::kNotRaw,\n \"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\");\n EXPECT_THAT(entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] \"));\n for (size_t sz =\n strlen(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] \") +\n 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\n \"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(\n entry.entry().text_message(),\n Eq(\"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\"));\n}\nTEST(LogEntryTest, LongNegativeFields) {\n if (std::is_signed::value) {\n LogEntryTestPeer entry(\n \"I am the very model of a modern Major-General / \"\n \"I've information vegetable, animal, and mineral.\",\n -2147483647, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.678967896789\",\n static_cast(-2147483647),\n absl::log_internal::PrefixFormat::kNotRaw,\n \"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\");\n EXPECT_THAT(\n entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \"));\n for (size_t sz =\n strlen(\n \"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \") +\n 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\n \"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678967 -2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(\n entry.entry().text_message(),\n Eq(\"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\"));\n } else {\n LogEntryTestPeer entry(\n \"I am the very model of a modern Major-General / \"\n \"I've information vegetable, animal, and mineral.\",\n -2147483647, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.678967896789\", 2147483647,\n absl::log_internal::PrefixFormat::kNotRaw,\n \"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\");\n EXPECT_THAT(\n entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \"));\n for (size_t sz =\n strlen(\n \"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \") +\n 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\n \"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678967 2147483647 I am the very model of a \"\n \"modern Major-General / I've information vegetable, animal, \"\n \"and mineral.:-2147483647] I know the kings of England, and I \"\n \"quote the fights historical / From Marathon to Waterloo, in \"\n \"order categorical.\"));\n EXPECT_THAT(\n entry.entry().text_message(),\n Eq(\"I know the kings of England, and I quote the fights historical / \"\n \"From Marathon to Waterloo, in order categorical.\"));\n }\n}\nTEST(LogEntryTest, Raw) {\n LogEntryTestPeer entry(\"foo.cc\", 1234, kUsePrefix, absl::LogSeverity::kInfo,\n \"2020-01-02T03:04:05.6789\", 451,\n absl::log_internal::PrefixFormat::kRaw, \"hello world\");\n EXPECT_THAT(\n entry.FormatLogMessage(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\"));\n EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: \"));\n for (size_t sz =\n strlen(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: \") + 20;\n sz != std::numeric_limits::max(); sz--)\n EXPECT_THAT(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: \",\n StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix_and_newline_c_str(),\n StrEq(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\\n\"));\n EXPECT_THAT(\n entry.entry().text_message_with_prefix(),\n Eq(\"I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\"));\n EXPECT_THAT(entry.entry().text_message(), Eq(\"hello world\"));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry_test.cc"},"Commit Hash":{"kind":"string","value":"03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4"}}},{"rowIdx":295,"cells":{"ID":{"kind":"string","value":"d37b53b4-31a7-4b63-b4fa-2549c355aa9f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/cel-cpp"},"File Name":{"kind":"string","value":"type_introspector"},"File Path in Repository":{"kind":"string","value":"common/type_introspector.cc"},"File Path for Unit Test":{"kind":"string","value":"extensions/protobuf/type_introspector_test.cc"},"Code":{"kind":"string","value":"#include \"common/type_introspector.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/inlined_vector.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/string_view.h\"\n#include \"absl/types/optional.h\"\n#include \"common/memory.h\"\n#include \"common/type.h\"\n#include \"common/types/thread_compatible_type_introspector.h\"\nnamespace cel {\nnamespace {\ncommon_internal::BasicStructTypeField MakeBasicStructTypeField(\n absl::string_view name, Type type, int32_t number) {\n return common_internal::BasicStructTypeField(name, number, type);\n}\nstruct FieldNameComparer {\n using is_transparent = void;\n bool operator()(const common_internal::BasicStructTypeField& lhs,\n const common_internal::BasicStructTypeField& rhs) const {\n return (*this)(lhs.name(), rhs.name());\n }\n bool operator()(const common_internal::BasicStructTypeField& lhs,\n absl::string_view rhs) const {\n return (*this)(lhs.name(), rhs);\n }\n bool operator()(absl::string_view lhs,\n const common_internal::BasicStructTypeField& rhs) const {\n return (*this)(lhs, rhs.name());\n }\n bool operator()(absl::string_view lhs, absl::string_view rhs) const {\n return lhs < rhs;\n }\n};\nstruct FieldNumberComparer {\n using is_transparent = void;\n bool operator()(const common_internal::BasicStructTypeField& lhs,\n const common_internal::BasicStructTypeField& rhs) const {\n return (*this)(lhs.number(), rhs.number());\n }\n bool operator()(const common_internal::BasicStructTypeField& lhs,\n int64_t rhs) const {\n return (*this)(lhs.number(), rhs);\n }\n bool operator()(int64_t lhs,\n const common_internal::BasicStructTypeField& rhs) const {\n return (*this)(lhs, rhs.number());\n }\n bool operator()(int64_t lhs, int64_t rhs) const { return lhs < rhs; }\n};\nstruct WellKnownType {\n WellKnownType(\n const Type& type,\n std::initializer_list fields)\n : type(type), fields_by_name(fields), fields_by_number(fields) {\n std::sort(fields_by_name.begin(), fields_by_name.end(),\n FieldNameComparer{});\n std::sort(fields_by_number.begin(), fields_by_number.end(),\n FieldNumberComparer{});\n }\n explicit WellKnownType(const Type& type) : WellKnownType(type, {}) {}\n Type type;\n absl::InlinedVector fields_by_name;\n absl::InlinedVector\n fields_by_number;\n absl::optional FieldByName(absl::string_view name) const {\n auto it = std::lower_bound(fields_by_name.begin(), fields_by_name.end(),\n name, FieldNameComparer{});\n if (it == fields_by_name.end() || it->name() != name) {\n return absl::nullopt;\n }\n return *it;\n }\n absl::optional FieldByNumber(int64_t number) const {\n auto it = std::lower_bound(fields_by_number.begin(), fields_by_number.end(),\n number, FieldNumberComparer{});\n if (it == fields_by_number.end() || it->number() != number) {\n return absl::nullopt;\n }\n return *it;\n }\n};\nusing WellKnownTypesMap = absl::flat_hash_map;\nconst WellKnownTypesMap& GetWellKnownTypesMap() {\n static const WellKnownTypesMap* types = []() -> WellKnownTypesMap* {\n WellKnownTypesMap* types = new WellKnownTypesMap();\n types->insert_or_assign(\n \"google.protobuf.BoolValue\",\n WellKnownType{BoolWrapperType{},\n {MakeBasicStructTypeField(\"value\", BoolType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.Int32Value\",\n WellKnownType{IntWrapperType{},\n {MakeBasicStructTypeField(\"value\", IntType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.Int64Value\",\n WellKnownType{IntWrapperType{},\n {MakeBasicStructTypeField(\"value\", IntType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.UInt32Value\",\n WellKnownType{UintWrapperType{},\n {MakeBasicStructTypeField(\"value\", UintType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.UInt64Value\",\n WellKnownType{UintWrapperType{},\n {MakeBasicStructTypeField(\"value\", UintType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.FloatValue\",\n WellKnownType{DoubleWrapperType{},\n {MakeBasicStructTypeField(\"value\", DoubleType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.DoubleValue\",\n WellKnownType{DoubleWrapperType{},\n {MakeBasicStructTypeField(\"value\", DoubleType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.StringValue\",\n WellKnownType{StringWrapperType{},\n {MakeBasicStructTypeField(\"value\", StringType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.BytesValue\",\n WellKnownType{BytesWrapperType{},\n {MakeBasicStructTypeField(\"value\", BytesType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.Duration\",\n WellKnownType{DurationType{},\n {MakeBasicStructTypeField(\"seconds\", IntType{}, 1),\n MakeBasicStructTypeField(\"nanos\", IntType{}, 2)}});\n types->insert_or_assign(\n \"google.protobuf.Timestamp\",\n WellKnownType{TimestampType{},\n {MakeBasicStructTypeField(\"seconds\", IntType{}, 1),\n MakeBasicStructTypeField(\"nanos\", IntType{}, 2)}});\n types->insert_or_assign(\n \"google.protobuf.Value\",\n WellKnownType{\n DynType{},\n {MakeBasicStructTypeField(\"null_value\", NullType{}, 1),\n MakeBasicStructTypeField(\"number_value\", DoubleType{}, 2),\n MakeBasicStructTypeField(\"string_value\", StringType{}, 3),\n MakeBasicStructTypeField(\"bool_value\", BoolType{}, 4),\n MakeBasicStructTypeField(\"struct_value\", JsonMapType(), 5),\n MakeBasicStructTypeField(\"list_value\", ListType{}, 6)}});\n types->insert_or_assign(\n \"google.protobuf.ListValue\",\n WellKnownType{ListType{},\n {MakeBasicStructTypeField(\"values\", ListType{}, 1)}});\n types->insert_or_assign(\n \"google.protobuf.Struct\",\n WellKnownType{JsonMapType(),\n {MakeBasicStructTypeField(\"fields\", JsonMapType(), 1)}});\n types->insert_or_assign(\n \"google.protobuf.Any\",\n WellKnownType{AnyType{},\n {MakeBasicStructTypeField(\"type_url\", StringType{}, 1),\n MakeBasicStructTypeField(\"value\", BytesType{}, 2)}});\n types->insert_or_assign(\"null_type\", WellKnownType{NullType{}});\n types->insert_or_assign(\"google.protobuf.NullValue\",\n WellKnownType{NullType{}});\n types->insert_or_assign(\"bool\", WellKnownType{BoolType{}});\n types->insert_or_assign(\"int\", WellKnownType{IntType{}});\n types->insert_or_assign(\"uint\", WellKnownType{UintType{}});\n types->insert_or_assign(\"double\", WellKnownType{DoubleType{}});\n types->insert_or_assign(\"bytes\", WellKnownType{BytesType{}});\n types->insert_or_assign(\"string\", WellKnownType{StringType{}});\n types->insert_or_assign(\"list\", WellKnownType{ListType{}});\n types->insert_or_assign(\"map\", WellKnownType{MapType{}});\n types->insert_or_assign(\"type\", WellKnownType{TypeType{}});\n return types;\n }();\n return *types;\n}\n} \nabsl::StatusOr> TypeIntrospector::FindType(\n TypeFactory& type_factory, absl::string_view name) const {\n const auto& well_known_types = GetWellKnownTypesMap();\n if (auto it = well_known_types.find(name); it != well_known_types.end()) {\n return it->second.type;\n }\n return FindTypeImpl(type_factory, name);\n}\nabsl::StatusOr>\nTypeIntrospector::FindEnumConstant(TypeFactory& type_factory,\n absl::string_view type,\n absl::string_view value) const {\n if (type == \"google.protobuf.NullValue\" && value == \"NULL_VALUE\") {\n return EnumConstant{NullType{}, \"google.protobuf.NullValue\", \"NULL_VALUE\",\n 0};\n }\n return FindEnumConstantImpl(type_factory, type, value);\n}\nabsl::StatusOr>\nTypeIntrospector::FindStructTypeFieldByName(TypeFactory& type_factory,\n absl::string_view type,\n absl::string_view name) const {\n const auto& well_known_types = GetWellKnownTypesMap();\n if (auto it = well_known_types.find(type); it != well_known_types.end()) {\n return it->second.FieldByName(name);\n }\n return FindStructTypeFieldByNameImpl(type_factory, type, name);\n}\nabsl::StatusOr> TypeIntrospector::FindTypeImpl(\n TypeFactory&, absl::string_view) const {\n return absl::nullopt;\n}\nabsl::StatusOr>\nTypeIntrospector::FindEnumConstantImpl(TypeFactory&, absl::string_view,\n absl::string_view) const {\n return absl::nullopt;\n}\nabsl::StatusOr>\nTypeIntrospector::FindStructTypeFieldByNameImpl(TypeFactory&, absl::string_view,\n absl::string_view) const {\n return absl::nullopt;\n}\nShared NewThreadCompatibleTypeIntrospector(\n MemoryManagerRef memory_manager) {\n return memory_manager\n .MakeShared();\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"extensions/protobuf/type_introspector.h\"\n#include \"absl/types/optional.h\"\n#include \"common/type.h\"\n#include \"common/type_kind.h\"\n#include \"common/type_testing.h\"\n#include \"internal/testing.h\"\n#include \"proto/test/v1/proto2/test_all_types.pb.h\"\n#include \"google/protobuf/descriptor.h\"\nnamespace cel::extensions {\nnamespace {\nusing ::absl_testing::IsOkAndHolds;\nusing ::google::api::expr::test::v1::proto2::TestAllTypes;\nusing ::testing::Eq;\nusing ::testing::Optional;\nclass ProtoTypeIntrospectorTest\n : public common_internal::ThreadCompatibleTypeTest<> {\n private:\n Shared NewTypeIntrospector(\n MemoryManagerRef memory_manager) override {\n return memory_manager.MakeShared();\n }\n};\nTEST_P(ProtoTypeIntrospectorTest, FindType) {\n EXPECT_THAT(\n type_manager().FindType(TestAllTypes::descriptor()->full_name()),\n IsOkAndHolds(Optional(Eq(MessageType(TestAllTypes::GetDescriptor())))));\n EXPECT_THAT(type_manager().FindType(\"type.that.does.not.Exist\"),\n IsOkAndHolds(Eq(absl::nullopt)));\n}\nTEST_P(ProtoTypeIntrospectorTest, FindStructTypeFieldByName) {\n ASSERT_OK_AND_ASSIGN(\n auto field, type_manager().FindStructTypeFieldByName(\n TestAllTypes::descriptor()->full_name(), \"single_int32\"));\n ASSERT_TRUE(field.has_value());\n EXPECT_THAT(field->name(), Eq(\"single_int32\"));\n EXPECT_THAT(field->number(), Eq(1));\n EXPECT_THAT(\n type_manager().FindStructTypeFieldByName(\n TestAllTypes::descriptor()->full_name(), \"field_that_does_not_exist\"),\n IsOkAndHolds(Eq(absl::nullopt)));\n EXPECT_THAT(type_manager().FindStructTypeFieldByName(\n \"type.that.does.not.Exist\", \"does_not_matter\"),\n IsOkAndHolds(Eq(absl::nullopt)));\n}\nTEST_P(ProtoTypeIntrospectorTest, FindEnumConstant) {\n ProtoTypeIntrospector introspector;\n const auto* enum_desc = TestAllTypes::NestedEnum_descriptor();\n ASSERT_OK_AND_ASSIGN(\n auto enum_constant,\n introspector.FindEnumConstant(\n type_manager(),\n \"google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum\", \"BAZ\"));\n ASSERT_TRUE(enum_constant.has_value());\n EXPECT_EQ(enum_constant->type.kind(), TypeKind::kEnum);\n EXPECT_EQ(enum_constant->type_full_name, enum_desc->full_name());\n EXPECT_EQ(enum_constant->value_name, \"BAZ\");\n EXPECT_EQ(enum_constant->number, 2);\n}\nTEST_P(ProtoTypeIntrospectorTest, FindEnumConstantNull) {\n ProtoTypeIntrospector introspector;\n ASSERT_OK_AND_ASSIGN(\n auto enum_constant,\n introspector.FindEnumConstant(type_manager(), \"google.protobuf.NullValue\",\n \"NULL_VALUE\"));\n ASSERT_TRUE(enum_constant.has_value());\n EXPECT_EQ(enum_constant->type.kind(), TypeKind::kNull);\n EXPECT_EQ(enum_constant->type_full_name, \"google.protobuf.NullValue\");\n EXPECT_EQ(enum_constant->value_name, \"NULL_VALUE\");\n EXPECT_EQ(enum_constant->number, 0);\n}\nTEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownEnum) {\n ProtoTypeIntrospector introspector;\n ASSERT_OK_AND_ASSIGN(\n auto enum_constant,\n introspector.FindEnumConstant(type_manager(), \"NotARealEnum\", \"BAZ\"));\n EXPECT_FALSE(enum_constant.has_value());\n}\nTEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownValue) {\n ProtoTypeIntrospector introspector;\n ASSERT_OK_AND_ASSIGN(\n auto enum_constant,\n introspector.FindEnumConstant(\n type_manager(),\n \"google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum\", \"QUX\"));\n ASSERT_FALSE(enum_constant.has_value());\n}\nINSTANTIATE_TEST_SUITE_P(\n ProtoTypeIntrospectorTest, ProtoTypeIntrospectorTest,\n ::testing::Values(MemoryManagement::kPooling,\n MemoryManagement::kReferenceCounting),\n ProtoTypeIntrospectorTest::ToString);\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/type_introspector.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/type_introspector_test.cc"},"Commit Hash":{"kind":"string","value":"4552db5798fb0853b131b783d8875794334fae7f"}}},{"rowIdx":296,"cells":{"ID":{"kind":"string","value":"7ab47ab0-9fed-413f-9b03-23b0997a2c1f"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"nest_gemm_fusion"},"File Path in Repository":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc"},"File Path for Unit Test":{"kind":"string","value":"third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc"},"Code":{"kind":"string","value":"#include \"xla/service/gpu/transforms/nest_gemm_fusion.h\"\n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"absl/algorithm/container.h\"\n#include \"absl/container/flat_hash_map.h\"\n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/log/check.h\"\n#include \"absl/status/status.h\"\n#include \"absl/status/statusor.h\"\n#include \"absl/strings/str_cat.h\"\n#include \"absl/types/span.h\"\n#include \"llvm/ADT/SmallVector.h\"\n#include \"mlir/IR/MLIRContext.h\"\n#include \"xla/hlo/ir/dfs_hlo_visitor_with_default.h\"\n#include \"xla/hlo/ir/hlo_casting_utils.h\"\n#include \"xla/hlo/ir/hlo_computation.h\"\n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/hlo/ir/hlo_instructions.h\"\n#include \"xla/hlo/ir/hlo_opcode.h\"\n#include \"xla/hlo/utils/hlo_query.h\"\n#include \"xla/service/gpu/ir_emission_utils.h\"\n#include \"xla/service/gpu/matmul_utils.h\"\n#include \"xla/service/gpu/model/symbolic_tile_analysis.h\"\n#include \"xla/service/gpu/model/symbolic_tiled_hlo_instruction.h\"\n#include \"xla/service/gpu/model/tiled_hlo_computation.h\"\n#include \"xla/service/hlo_dce.h\"\n#include \"xla/service/instruction_fusion.h\"\n#include \"tsl/platform/errors.h\"\n#include \"tsl/platform/statusor.h\"\nnamespace xla::gpu {\nnamespace {\nabsl::Status FuseInstructionsForConsumer(\n const std::vector& instructions,\n HloInstruction& consumer) {\n HloComputation::Builder builder(instructions.back()->name());\n absl::flat_hash_map\n old_to_new_mapping;\n std::vector parameters;\n auto add_parameter = [&](HloInstruction* instruction) -> void {\n int param_index = parameters.size();\n old_to_new_mapping[instruction] =\n builder.AddInstruction(HloInstruction::CreateParameter(\n param_index, instruction->shape(),\n absl::StrCat(\"parameter_\", param_index)));\n parameters.push_back(instruction);\n };\n for (HloInstruction* instruction : instructions) {\n if (old_to_new_mapping.contains(instruction)) {\n continue;\n }\n if (instruction->opcode() == HloOpcode::kParameter) {\n add_parameter(instruction);\n continue;\n }\n std::vector new_operands;\n for (HloInstruction* operand : instruction->mutable_operands()) {\n if (!old_to_new_mapping.contains(operand)) {\n add_parameter(operand);\n }\n new_operands.push_back(old_to_new_mapping[operand]);\n }\n old_to_new_mapping[instruction] = builder.AddInstruction(\n instruction->CloneWithNewOperands(instruction->shape(), new_operands));\n }\n HloInstruction* old_root = instructions.back();\n old_to_new_mapping[old_root]->MarkAsRoot();\n HloComputation* computation =\n old_root->GetModule()->AddComputationAndUnifyNamesAndIds(\n builder.Build(), false);\n HloInstruction* fusion =\n old_root->parent()->AddInstruction(HloInstruction::CreateFusion(\n old_root->shape(), HloInstruction::FusionKind::kCustom, parameters,\n computation));\n fusion->GetModule()->SetAndUniquifyInstrName(fusion, \"block_fusion\");\n TF_ASSIGN_OR_RETURN(auto gpu_config,\n fusion->backend_config());\n FusionBackendConfig& backend_config =\n *gpu_config.mutable_fusion_backend_config();\n backend_config.set_kind(std::string(kTritonFusionKind));\n TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config));\n for (int64_t operand_index : consumer.OperandIndices(old_root)) {\n TF_RETURN_IF_ERROR(consumer.ReplaceOperandWith(operand_index, fusion));\n }\n return absl::OkStatus();\n}\nabsl::Status AnnotateDotOperandNestedFusionImpl(\n HloFusionInstruction& nested_fusion, const HloDotInstruction& dot,\n const TritonGemmConfig& config,\n absl::Span contracting_dimensions, \n absl::Span batch_dimensions, int64_t contracting_dim_size,\n int64_t non_contracting_dim_size) {\n if (contracting_dimensions.size() != 1) {\n return absl::InternalError(\n absl::StrCat(\"Expected a single lhs contracting dimension but got \",\n contracting_dimensions.size()));\n }\n TF_ASSIGN_OR_RETURN(\n std::vector non_contracting_dimensions,\n GetNonContractingDims(dot.operand(0)->shape(), batch_dimensions,\n contracting_dimensions));\n if (non_contracting_dimensions.size() != 1) {\n return absl::InternalError(\n absl::StrCat(\"Expected a single non-contracting dimension but got \",\n non_contracting_dimensions.size()));\n }\n std::vector output_tile_sizes(dot.operand(0)->shape().rank(), 1);\n output_tile_sizes[contracting_dimensions[0]] = contracting_dim_size;\n output_tile_sizes[non_contracting_dimensions[0]] = non_contracting_dim_size;\n BlockLevelParameters block_level_parameters;\n block_level_parameters.output_tile_sizes = std::move(output_tile_sizes);\n TF_ASSIGN_OR_RETURN(auto backend_config,\n nested_fusion.backend_config());\n *backend_config.mutable_fusion_backend_config()\n ->mutable_block_level_fusion_config() =\n block_level_parameters.ToBlockLevelFusionConfig();\n TF_RETURN_IF_ERROR(nested_fusion.set_backend_config(backend_config));\n return absl::OkStatus();\n}\nabsl::Status AnnotateDotLhsNestedFusion(HloFusionInstruction& nested_fusion,\n const HloDotInstruction& dot,\n const TritonGemmConfig& config) {\n const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();\n return AnnotateDotOperandNestedFusionImpl(\n nested_fusion, dot, config,\n dimension_numbers.lhs_contracting_dimensions(),\n dimension_numbers.lhs_batch_dimensions(), config.block_k, config.block_m);\n}\nabsl::Status AnnotateDotRhsNestedFusion(HloFusionInstruction& nested_fusion,\n const HloDotInstruction& dot,\n const TritonGemmConfig& config) {\n const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();\n return AnnotateDotOperandNestedFusionImpl(\n nested_fusion, dot, config,\n dimension_numbers.rhs_contracting_dimensions(),\n dimension_numbers.rhs_batch_dimensions(), config.block_k, config.block_n);\n}\nabsl::StatusOr> FindOutputTileSizesForEpilogue(\n const SymbolicTiledHloInstruction& tiled_dot,\n const SymbolicTileAnalysis& analysis, const TritonGemmConfig& config) {\n int64_t dot_rank = tiled_dot.symbolic_tile().tile_map().GetDimensionCount();\n llvm::SmallVector expected_dot_tile_sizes(dot_rank, 1);\n expected_dot_tile_sizes[dot_rank - 2] = config.block_m;\n expected_dot_tile_sizes[dot_rank - 1] = config.block_n;\n llvm::SmallVector output_tile_sizes = expected_dot_tile_sizes;\n std::sort(output_tile_sizes.begin(), output_tile_sizes.end());\n do {\n TF_ASSIGN_OR_RETURN(\n bool parameters_satisfy_constraints,\n analysis.ParametersSatisfyConstraints(output_tile_sizes));\n if (!parameters_satisfy_constraints) {\n continue;\n }\n auto mapped_dot_tile_sizes = tiled_dot.TileSizes(output_tile_sizes);\n if (mapped_dot_tile_sizes == expected_dot_tile_sizes) {\n return output_tile_sizes;\n }\n } while (std::next_permutation(output_tile_sizes.begin(),\n output_tile_sizes.end()));\n return absl::InternalError(absl::StrCat(\n \"Couldn't find output tile sizes that satisfy \", tiled_dot.ToString()));\n}\nabsl::StatusOr GetTritonGemmConfig(\n const HloFusionInstruction& fusion) {\n TF_ASSIGN_OR_RETURN(auto gpu_config,\n fusion.backend_config());\n const FusionBackendConfig& backend_config =\n gpu_config.fusion_backend_config();\n if (!backend_config.has_triton_gemm_config()) {\n return absl::InternalError(\n \"The fusion's backend config doesn't have a triton_gemm_config.\");\n }\n return TritonGemmConfig::FromProto(backend_config.triton_gemm_config());\n}\nabsl::Status MakeNestedFusionFromGemmFusion(\n HloFusionInstruction* fusion, const TritonGemmConfig& config,\n const SymbolicTileAnalysis& analysis,\n const SymbolicTiledHloInstruction& tiled_dot, HloDotInstruction* dot) {\n DCHECK(GetTritonGemmConfig(*fusion).value() == config);\n DCHECK_EQ(tiled_dot.hlo(), dot);\n HloComputation* computation = fusion->called_computation();\n TF_RETURN_IF_ERROR(FuseInstructionsForConsumer(\n computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(0)),\n *dot));\n TF_RETURN_IF_ERROR(AnnotateDotLhsNestedFusion(\n *::xla::Cast(dot->mutable_operand(0)), *dot,\n config));\n TF_RETURN_IF_ERROR(FuseInstructionsForConsumer(\n computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(1)),\n *dot));\n TF_RETURN_IF_ERROR(AnnotateDotRhsNestedFusion(\n *::xla::Cast(dot->mutable_operand(1)), *dot,\n config));\n TF_ASSIGN_OR_RETURN([[maybe_unused]] bool changed,\n HloDCE::RunOnComputation(\n computation,\n false));\n TF_ASSIGN_OR_RETURN(\n llvm::SmallVector output_tile_sizes,\n FindOutputTileSizesForEpilogue(tiled_dot, analysis, config));\n TF_ASSIGN_OR_RETURN(auto gpu_config,\n fusion->backend_config());\n FusionBackendConfig& backend_config =\n *gpu_config.mutable_fusion_backend_config();\n backend_config.set_kind(std::string(kTritonFusionKind));\n BlockLevelParameters block_level_parameters;\n block_level_parameters.output_tile_sizes.assign(output_tile_sizes.begin(),\n output_tile_sizes.end());\n *backend_config.mutable_block_level_fusion_config() =\n block_level_parameters.ToBlockLevelFusionConfig();\n TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config));\n return absl::OkStatus();\n}\nsize_t GetDotCount(HloComputation* computation) {\n return absl::c_count_if(computation->instructions(), [](HloInstruction* hlo) {\n return hlo->opcode() == HloOpcode::kDot;\n });\n}\nclass NestGemmFusionVisitor : public DfsHloRewriteVisitor {\n public:\n explicit NestGemmFusionVisitor(mlir::MLIRContext* ctx) : ctx_(ctx) {}\n absl::Status HandleFusion(HloInstruction* instruction) override {\n HloFusionInstruction* fusion = Cast(instruction);\n absl::StatusOr config = GetTritonGemmConfig(*fusion);\n if (!config.ok()) {\n return absl::OkStatus(); \n }\n HloComputation* computation = fusion->called_computation();\n HloInstruction* dot =\n hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot);\n if (dot == nullptr) {\n return absl::OkStatus(); \n }\n DCHECK_EQ(GetDotCount(computation), 1) << \"Fusion has more than one dot.\";\n SymbolicTileAnalysisOrError analysis_or =\n SymbolicTileAnalysis::AnalyzeComputation(\n *fusion->called_computations()[0], ctx_);\n if (std::holds_alternative(analysis_or)) {\n return absl::InternalError(\n absl::StrCat(\"Failed to analyze the computation (\",\n std::get(analysis_or).Explain(),\n \"): \", fusion->called_computation()->ToString()));\n }\n auto& analysis = std::get(analysis_or);\n auto tiled_dot_it = absl::c_find_if(\n analysis.GetSymbolicTiledHloComputation(),\n [&](const auto& tiled_hlo) { return tiled_hlo->hlo() == dot; });\n if (tiled_dot_it == analysis.GetSymbolicTiledHloComputation().end()) {\n return absl::InternalError(absl::StrCat(\n \"Couldn't find a symbolic tiled instruction for \", dot->ToString()));\n }\n TF_RETURN_IF_ERROR(MakeNestedFusionFromGemmFusion(\n fusion, config.value(), analysis, **tiled_dot_it,\n Cast(dot)));\n this->MarkAsChanged();\n return absl::OkStatus();\n }\n private:\n mlir::MLIRContext* ctx_;\n};\n} \nabsl::StatusOr NestGemmFusion::Run(\n HloModule* module,\n const absl::flat_hash_set& execution_threads) {\n bool changed = false;\n mlir::MLIRContext ctx;\n for (HloComputation* computation :\n module->MakeNonfusionComputations(execution_threads)) {\n NestGemmFusionVisitor visitor(&ctx);\n TF_RETURN_IF_ERROR(computation->Accept(&visitor));\n changed |= visitor.changed();\n }\n return changed;\n}\n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"xla/service/gpu/transforms/nest_gemm_fusion.h\"\n#include \n#include \n#include \n#include \"xla/hlo/ir/hlo_instruction.h\"\n#include \"xla/service/gpu/backend_configs.pb.h\"\n#include \"xla/service/pattern_matcher.h\"\n#include \"xla/service/pattern_matcher_gmock.h\"\n#include \"xla/tests/hlo_test_base.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tsl/platform/statusor.h\"\nusing ::testing::ElementsAre;\nnamespace xla {\nstatic void PrintTo(const HloInstruction& hlo, std::ostream* os) {\n *os << hlo.ToString();\n}\nnamespace gpu {\nnamespace {\nMATCHER_P(OutputTileSizesIs, matcher, \"\") {\n auto backend_config = arg.template backend_config();\n if (!backend_config.ok()) {\n *result_listener << \"failed to get backend config: \"\n << backend_config.status();\n return false;\n }\n FusionBackendConfig fusion_backend_config =\n backend_config->fusion_backend_config();\n if (!fusion_backend_config.has_block_level_fusion_config()) {\n *result_listener << \"has no block level fusion config\";\n return false;\n }\n auto output_tile_sizes =\n fusion_backend_config.block_level_fusion_config().output_tile_sizes();\n return ExplainMatchResult(matcher, output_tile_sizes, result_listener);\n}\nclass NestGemmFusionTest : public HloTestBase {};\nTEST_F(NestGemmFusionTest, BasicTest) {\n TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R\"(\nHloModule module\ndot {\n lhs = bf16[8192,512] parameter(0)\n rhs = bf16[512,512] parameter(1)\n ROOT %dot = bf16[8192,512] dot(lhs, rhs),\n lhs_contracting_dims={1}, rhs_contracting_dims={0}\n}\nENTRY entry {\n p0 = bf16[8192,512] parameter(0)\n p1 = bf16[512,512] parameter(1)\n ROOT fusion = bf16[8192,512] fusion(p0, p1),\n kind=kCustom, calls=dot, backend_config={\n \"fusion_backend_config\": {\n \"kind\":\"__triton_gemm\", \"triton_gemm_config\": {\n \"block_m\":\"64\", \"block_n\":\"256\", \"block_k\":\"32\",\n \"split_k\":\"1\", \"num_stages\":\"1\", \"num_warps\":\"1\", \"num_ctas\":\"1\"\n }\n }\n }\n}\n)\"));\n TF_ASSERT_OK_AND_ASSIGN(bool changed, NestGemmFusion().Run(module.get()))\n EXPECT_TRUE(changed);\n TF_ASSERT_OK(verifier().Run(module.get()).status());\n const HloInstruction* fusion = nullptr;\n ASSERT_THAT(module->entry_computation()->root_instruction(),\n GmockMatch(match::Fusion(&fusion)));\n EXPECT_THAT(*fusion, OutputTileSizesIs(ElementsAre(64, 256)));\n const HloInstruction* lhs = nullptr;\n const HloInstruction* rhs = nullptr;\n EXPECT_THAT(fusion->fused_expression_root(),\n GmockMatch(match::Dot(match::Fusion(&lhs), match::Fusion(&rhs))));\n EXPECT_THAT(*lhs, OutputTileSizesIs(ElementsAre(64, 32)));\n EXPECT_THAT(*rhs, OutputTileSizesIs(ElementsAre(32, 256)));\n}\n} \n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":297,"cells":{"ID":{"kind":"string","value":"79b73ddc-d78b-4fc8-891a-abd1b5e2ac20"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/libaddressinput"},"File Name":{"kind":"string","value":"region_data_builder"},"File Path in Repository":{"kind":"string","value":"cpp/src/region_data_builder.cc"},"File Path for Unit Test":{"kind":"string","value":"cpp/test/region_data_builder_test.cc"},"Code":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"language.h\"\n#include \"lookup_key.h\"\n#include \"region_data_constants.h\"\n#include \"rule.h\"\n#include \"util/size.h\"\nnamespace i18n {\nnamespace addressinput {\nnamespace {\nconst size_t kLookupKeysMaxDepth = size(LookupKey::kHierarchy) - 1;\nvoid BuildRegionTreeRecursively(\n const std::map& rules,\n std::map::const_iterator hint,\n const LookupKey& parent_key,\n RegionData* parent_region,\n const std::vector& keys,\n bool prefer_latin_name,\n size_t region_max_depth) {\n assert(parent_region != nullptr);\n LookupKey lookup_key;\n for (const auto& key : keys) {\n lookup_key.FromLookupKey(parent_key, key);\n const std::string lookup_key_string =\n lookup_key.ToKeyString(kLookupKeysMaxDepth);\n ++hint;\n if (hint == rules.end() || hint->first != lookup_key_string) {\n hint = rules.find(lookup_key_string);\n if (hint == rules.end()) {\n return;\n }\n }\n const Rule* rule = hint->second;\n assert(rule != nullptr);\n const std::string& local_name = rule->GetName().empty()\n ? key : rule->GetName();\n const std::string& name =\n prefer_latin_name && !rule->GetLatinName().empty()\n ? rule->GetLatinName() : local_name;\n RegionData* region = parent_region->AddSubRegion(key, name);\n if (!rule->GetSubKeys().empty() &&\n region_max_depth > parent_key.GetDepth()) {\n BuildRegionTreeRecursively(rules,\n hint,\n lookup_key,\n region,\n rule->GetSubKeys(),\n prefer_latin_name,\n region_max_depth);\n }\n }\n}\nRegionData* BuildRegion(const std::map& rules,\n const std::string& region_code,\n const Language& language) {\n AddressData address;\n address.region_code = region_code;\n LookupKey lookup_key;\n lookup_key.FromAddress(address);\n auto hint = rules.find(lookup_key.ToKeyString(kLookupKeysMaxDepth));\n assert(hint != rules.end());\n const Rule* rule = hint->second;\n assert(rule != nullptr);\n auto* region = new RegionData(region_code);\n size_t region_max_depth =\n RegionDataConstants::GetMaxLookupKeyDepth(region_code);\n if (region_max_depth > 0) {\n BuildRegionTreeRecursively(rules,\n hint,\n lookup_key,\n region,\n rule->GetSubKeys(),\n language.has_latin_script,\n region_max_depth);\n }\n return region;\n}\n} \nRegionDataBuilder::RegionDataBuilder(PreloadSupplier* supplier)\n : supplier_(supplier),\n cache_() {\n assert(supplier_ != nullptr);\n}\nRegionDataBuilder::~RegionDataBuilder() {\n for (const auto& outer : cache_) {\n assert(outer.second != nullptr);\n for (const auto& inner : *outer.second) {\n delete inner.second;\n }\n delete outer.second;\n }\n}\nconst RegionData& RegionDataBuilder::Build(\n const std::string& region_code,\n const std::string& ui_language_tag,\n std::string* best_region_tree_language_tag) {\n assert(supplier_->IsLoaded(region_code));\n assert(best_region_tree_language_tag != nullptr);\n auto region_it = cache_.find(region_code);\n if (region_it == cache_.end()) {\n region_it = cache_.emplace(region_code, new LanguageRegionMap).first;\n }\n Rule rule;\n rule.ParseSerializedRule(RegionDataConstants::GetRegionData(region_code));\n static const Language kUndefinedLanguage(\"und\");\n const Language best_language =\n rule.GetLanguages().empty()\n ? kUndefinedLanguage\n : ChooseBestAddressLanguage(rule, Language(ui_language_tag));\n *best_region_tree_language_tag = best_language.tag;\n auto language_it = region_it->second->find(best_language.tag);\n if (language_it == region_it->second->end()) {\n const auto& rules = supplier_->GetRulesForRegion(region_code);\n language_it = region_it->second\n ->emplace(best_language.tag,\n BuildRegion(rules, region_code, best_language))\n .first;\n }\n return *language_it->second;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \n#include \"testdata_source.h\"\nnamespace {\nusing i18n::addressinput::BuildCallback;\nusing i18n::addressinput::NullStorage;\nusing i18n::addressinput::PreloadSupplier;\nusing i18n::addressinput::RegionData;\nusing i18n::addressinput::RegionDataBuilder;\nusing i18n::addressinput::TestdataSource;\nclass RegionDataBuilderTest : public testing::Test {\n public:\n RegionDataBuilderTest(const RegionDataBuilderTest&) = delete;\n RegionDataBuilderTest& operator=(const RegionDataBuilderTest&) = delete;\n protected:\n RegionDataBuilderTest()\n : supplier_(new TestdataSource(true),\n new NullStorage),\n builder_(&supplier_),\n loaded_callback_(BuildCallback(this, &RegionDataBuilderTest::OnLoaded)),\n best_language_() {}\n PreloadSupplier supplier_;\n RegionDataBuilder builder_;\n const std::unique_ptr loaded_callback_;\n std::string best_language_;\n private:\n void OnLoaded(bool success, const std::string& region_code, int num_rules) {\n ASSERT_TRUE(success);\n ASSERT_FALSE(region_code.empty());\n ASSERT_LT(0, num_rules);\n ASSERT_TRUE(supplier_.IsLoaded(region_code));\n }\n};\nTEST_F(RegionDataBuilderTest, BuildUsRegionTree) {\n supplier_.LoadRules(\"US\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"US\", \"en-US\", &best_language_);\n EXPECT_FALSE(tree.sub_regions().empty());\n}\nTEST_F(RegionDataBuilderTest, BuildCnRegionTree) {\n supplier_.LoadRules(\"CN\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"CN\", \"zh-Hans\", &best_language_);\n ASSERT_FALSE(tree.sub_regions().empty());\n EXPECT_FALSE(tree.sub_regions().front()->sub_regions().empty());\n}\nTEST_F(RegionDataBuilderTest, BuildChRegionTree) {\n supplier_.LoadRules(\"CH\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"CH\", \"de-CH\", &best_language_);\n EXPECT_TRUE(tree.sub_regions().empty());\n}\nTEST_F(RegionDataBuilderTest, BuildZwRegionTree) {\n supplier_.LoadRules(\"ZW\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"ZW\", \"en-ZW\", &best_language_);\n EXPECT_TRUE(tree.sub_regions().empty());\n}\nTEST_F(RegionDataBuilderTest, UsTreeHasStateAbbreviationsAndNames) {\n supplier_.LoadRules(\"US\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"US\", \"en-US\", &best_language_);\n EXPECT_EQ(\"en\", best_language_);\n ASSERT_FALSE(tree.sub_regions().empty());\n EXPECT_EQ(\"AL\", tree.sub_regions().front()->key());\n EXPECT_EQ(\"Alabama\", tree.sub_regions().front()->name());\n}\nTEST_F(RegionDataBuilderTest,\n KrWithKoLatnLanguageHasKoreanKeysAndLatinScriptNames) {\n supplier_.LoadRules(\"KR\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"KR\", \"ko-Latn\", &best_language_);\n EXPECT_EQ(\"ko-Latn\", best_language_);\n ASSERT_FALSE(tree.sub_regions().empty());\n EXPECT_EQ(\"강원도\", tree.sub_regions().front()->key());\n EXPECT_EQ(\"Gangwon\", tree.sub_regions().front()->name());\n}\nTEST_F(RegionDataBuilderTest, KrWithKoKrLanguageHasKoreanKeysAndNames) {\n supplier_.LoadRules(\"KR\", *loaded_callback_);\n const RegionData& tree = builder_.Build(\"KR\", \"ko-KR\", &best_language_);\n EXPECT_EQ(\"ko\", best_language_);\n ASSERT_FALSE(tree.sub_regions().empty());\n EXPECT_EQ(\"강원도\", tree.sub_regions().front()->key());\n EXPECT_EQ(\"강원\", tree.sub_regions().front()->name());\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/region_data_builder.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/region_data_builder_test.cc"},"Commit Hash":{"kind":"string","value":"2610f7b1043d6784ada41392fc9392d1ea09ea07"}}},{"rowIdx":298,"cells":{"ID":{"kind":"string","value":"31c1fe9d-b92e-40d4-9b3a-0cd0e17a79e6"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"tensorflow/tensorflow"},"File Name":{"kind":"string","value":"dataset"},"File Path in Repository":{"kind":"string","value":"tensorflow/core/framework/dataset.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorflow/core/framework/dataset_test.cc"},"Code":{"kind":"string","value":"#include \"tensorflow/core/framework/dataset.h\"\n#include \n#include \n#include \"tensorflow/core/activity_watcher/activity.h\"\n#include \"tensorflow/core/framework/dataset.pb.h\"\n#include \"tensorflow/core/framework/device_base.h\"\n#include \"tensorflow/core/framework/function.h\"\n#include \"tensorflow/core/framework/op_kernel.h\"\n#include \"tensorflow/core/framework/resource_mgr.h\"\n#include \"tensorflow/core/framework/variant_encode_decode.h\"\n#include \"tensorflow/core/framework/variant_op_registry.h\"\n#include \"tensorflow/core/framework/versions.pb.h\"\n#include \"tensorflow/core/graph/graph_def_builder.h\"\n#include \"tensorflow/core/graph/node_builder.h\"\n#include \"tensorflow/core/platform/errors.h\"\n#include \"tensorflow/core/platform/logging.h\"\n#include \"tensorflow/core/platform/mutex.h\"\n#include \"tensorflow/core/platform/refcount.h\"\n#include \"tensorflow/core/platform/resource.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/strcat.h\"\n#include \"tensorflow/core/profiler/lib/traceme.h\"\n#include \"tensorflow/core/public/version.h\"\n#if defined(PLATFORM_WINDOWS)\n#undef GetMessage\n#endif\nnamespace tensorflow {\nnamespace data {\nnamespace {\nstatic mutex* get_dataset_op_registry_lock() {\n static mutex dataset_op_registry_lock(LINKER_INITIALIZED);\n return &dataset_op_registry_lock;\n}\nstatic std::unordered_set* get_dataset_op_registry() {\n static std::unordered_set* names = new std::unordered_set;\n return names;\n}\nstd::string UniqueNodeName(const std::string& base) {\n static std::atomic counter(0);\n return strings::StrCat(base, \"/\", counter.fetch_add(1));\n}\nclass DatasetVariantWrapper {\n public:\n DatasetVariantWrapper() : dataset_(nullptr) {}\n explicit DatasetVariantWrapper(DatasetBase* dataset) : dataset_(dataset) {}\n DatasetVariantWrapper(const DatasetVariantWrapper& other)\n : dataset_(other.dataset_) {\n if (dataset_) dataset_->Ref();\n }\n DatasetVariantWrapper& operator=(DatasetVariantWrapper&& other) {\n if (&other == this) return *this;\n std::swap(dataset_, other.dataset_);\n return *this;\n }\n DatasetVariantWrapper& operator=(const DatasetVariantWrapper& other) = delete;\n ~DatasetVariantWrapper() {\n if (dataset_) dataset_->Unref();\n }\n DatasetBase* get() const { return dataset_; }\n string TypeName() const { return \"tensorflow::DatasetVariantWrapper\"; }\n string DebugString() const {\n if (dataset_) {\n return dataset_->DebugString();\n } else {\n return \"\";\n }\n }\n void Encode(VariantTensorData* data) const {\n LOG(ERROR) << \"The Encode() method is not implemented for \"\n \"DatasetVariantWrapper objects.\";\n }\n bool Decode(const VariantTensorData& data) {\n LOG(ERROR) << \"The Decode() method is not implemented for \"\n \"DatasetVariantWrapper objects.\";\n return false;\n }\n private:\n DatasetBase* dataset_; \n};\nconst char kWrappedDatasetVariantTypeName[] =\n \"tensorflow::data::WrappedDatasetVariant\";\nclass WrappedDatasetVariantWrapper {\n public:\n WrappedDatasetVariantWrapper() {}\n explicit WrappedDatasetVariantWrapper(const Tensor& ds_tensor)\n : ds_tensor_(ds_tensor) {}\n Tensor get() const { return ds_tensor_; }\n string TypeName() const { return \"tensorflow::WrappedDatasetVariantWrapper\"; }\n string DebugString() const {\n return \"tensorflow::WrappedDatasetVariantWrapper::DebugString\";\n }\n void Encode(VariantTensorData* data) const {\n *(data->add_tensors()) = ds_tensor_;\n }\n bool Decode(const VariantTensorData& data) {\n ds_tensor_ = data.tensors(0);\n return true;\n }\n private:\n Tensor ds_tensor_;\n};\nclass WrapDatasetVariantOp : public OpKernel {\n public:\n explicit WrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}\n void Compute(OpKernelContext* ctx) override {\n const Tensor& tensor = ctx->input(0);\n OP_REQUIRES(ctx,\n tensor.dtype() == DT_VARIANT &&\n TensorShapeUtils::IsScalar(tensor.shape()),\n errors::InvalidArgument(\n \"Dataset tensor must be a scalar of dtype DT_VARIANT.\"));\n DatasetBase* unused;\n OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(tensor, &unused));\n Tensor* output = nullptr;\n OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));\n output->scalar()() = WrappedDatasetVariantWrapper(tensor);\n }\n};\nREGISTER_KERNEL_BUILDER(Name(\"WrapDatasetVariant\").Device(DEVICE_CPU),\n WrapDatasetVariantOp);\nREGISTER_KERNEL_BUILDER(Name(\"WrapDatasetVariant\")\n .HostMemory(\"input_handle\")\n .HostMemory(\"output_handle\")\n .Device(DEVICE_GPU),\n WrapDatasetVariantOp);\nclass UnwrapDatasetVariantOp : public OpKernel {\n public:\n explicit UnwrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}\n void Compute(OpKernelContext* ctx) override {\n const Tensor& tensor = ctx->input(0);\n OP_REQUIRES(ctx,\n tensor.dtype() == DT_VARIANT &&\n TensorShapeUtils::IsScalar(tensor.shape()),\n errors::InvalidArgument(\n \"Dataset tensor must be a scalar of dtype DT_VARIANT.\"));\n Variant variant = tensor.scalar()();\n const WrappedDatasetVariantWrapper* wrapper =\n variant.get();\n OP_REQUIRES(ctx, wrapper != nullptr,\n errors::InvalidArgument(\n \"Tensor must be a WrappedDataset variant object.\"));\n Tensor ds_tensor = wrapper->get();\n OP_REQUIRES_OK(ctx, ctx->set_output(\"output_handle\", ds_tensor));\n }\n};\nREGISTER_KERNEL_BUILDER(Name(\"UnwrapDatasetVariant\").Device(DEVICE_CPU),\n UnwrapDatasetVariantOp);\nREGISTER_KERNEL_BUILDER(Name(\"UnwrapDatasetVariant\")\n .HostMemory(\"input_handle\")\n .HostMemory(\"output_handle\")\n .Device(DEVICE_GPU),\n UnwrapDatasetVariantOp);\nstatic Status WrappedDatasetVariantDeviceCopy(\n const WrappedDatasetVariantWrapper& from, WrappedDatasetVariantWrapper* to,\n const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {\n *to = WrappedDatasetVariantWrapper(from);\n return absl::OkStatus();\n}\n#define REGISTER_OPTIONAL_COPY(DIRECTION) \\\n INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \\\n WrappedDatasetVariantWrapper, DIRECTION, \\\n WrappedDatasetVariantDeviceCopy)\nREGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);\nREGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);\nREGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);\nREGISTER_UNARY_VARIANT_DECODE_FUNCTION(WrappedDatasetVariantWrapper,\n kWrappedDatasetVariantTypeName);\n} \nStatus GraphDefBuilderWrapper::AddDataset(const DatasetBase* dataset,\n const std::vector& inputs,\n Node** output) {\n return AddDataset(dataset, inputs, {}, output);\n}\nStatus GraphDefBuilderWrapper::AddDataset(\n const DatasetBase* dataset, const std::vector& inputs,\n const std::vector>& attrs,\n Node** output) {\n std::vector> enumerated_inputs(inputs.size());\n for (size_t i = 0; i < inputs.size(); i++) {\n enumerated_inputs[i] = std::make_pair(i, inputs[i]);\n }\n return AddDataset(dataset, enumerated_inputs, {}, attrs, output);\n}\nStatus GraphDefBuilderWrapper::AddDataset(\n const DatasetBase* dataset,\n const std::vector>& inputs,\n const std::vector>>& list_inputs,\n const std::vector>& attrs,\n Node** output) {\n return AddDataset(dataset, inputs, list_inputs, attrs,\n false, output);\n}\nStatus GraphDefBuilderWrapper::AddDataset(\n const DatasetBase* dataset,\n const std::vector>& inputs,\n const std::vector>>& list_inputs,\n const std::vector>& attrs,\n bool use_dataset_name, Node** output) {\n auto& type_string = dataset->type_string();\n auto opts = absl::make_unique(b_->opts());\n bool has_output_types_attr = HasAttr(type_string, \"output_types\");\n bool has_output_shapes_attr = HasAttr(type_string, \"output_shapes\");\n if (has_output_shapes_attr) {\n opts = absl::make_unique(\n opts->WithAttr(\"output_shapes\", dataset->output_shapes()));\n }\n if (has_output_types_attr) {\n opts = absl::make_unique(\n opts->WithAttr(\"output_types\", dataset->output_dtypes()));\n }\n bool has_metadata_attr = HasAttr(type_string, \"metadata\");\n if (has_metadata_attr) {\n std::string serialized_metadata;\n dataset->metadata().SerializeToString(&serialized_metadata);\n opts = absl::make_unique(\n opts->WithAttr(\"metadata\", serialized_metadata));\n }\n for (const auto& attr : attrs) {\n opts = absl::make_unique(\n opts->WithAttr(attr.first, attr.second));\n }\n if (opts->HaveError()) {\n return errors::Internal(\"AddDataset: Failed to build Options with error \",\n opts->StatusToString());\n }\n NodeBuilder node_builder(\n use_dataset_name ? dataset->node_name() : opts->GetNameForOp(type_string),\n type_string, opts->op_registry());\n {\n size_t total_size = inputs.size() + list_inputs.size();\n auto inputs_iter = inputs.begin();\n auto list_inputs_iter = list_inputs.begin();\n for (int i = 0; i < total_size; i++) {\n if (inputs_iter != inputs.end() && inputs_iter->first == i) {\n node_builder.Input(NodeBuilder::NodeOut(inputs_iter->second));\n inputs_iter++;\n } else if (list_inputs_iter != list_inputs.end() &&\n list_inputs_iter->first == i) {\n std::vector nodeout_inputs;\n nodeout_inputs.reserve(list_inputs_iter->second.size());\n for (Node* n : list_inputs_iter->second) {\n nodeout_inputs.emplace_back(n);\n }\n node_builder.Input(nodeout_inputs);\n list_inputs_iter++;\n } else {\n return errors::InvalidArgument(\"No input found for index \", i);\n }\n }\n }\n *output = opts->FinalizeBuilder(&node_builder);\n if (*output == nullptr) {\n return errors::Internal(\"AddDataset: Failed to build \", type_string,\n \" op with error \", opts->StatusToString());\n }\n return absl::OkStatus();\n}\nStatus GraphDefBuilderWrapper::AddFunction(\n SerializationContext* ctx, const string& function_name,\n const FunctionLibraryDefinition& lib_def) {\n if (b_->HasFunction(function_name)) {\n VLOG(1) << \"Function with name \" << function_name << \"already exists in\"\n << \" the graph. It will not be added again.\";\n return absl::OkStatus();\n }\n const FunctionDef* f_def = lib_def.Find(function_name);\n if (f_def == nullptr) {\n return errors::InvalidArgument(\"Unable to find FunctionDef for \",\n function_name, \" in the registry.\");\n }\n FunctionDefLibrary def;\n *def.add_function() = *f_def;\n const string gradient_func = lib_def.FindGradient(function_name);\n if (!gradient_func.empty()) {\n GradientDef* g_def = def.add_gradient();\n g_def->set_function_name(function_name);\n g_def->set_gradient_func(gradient_func);\n }\n TF_RETURN_IF_ERROR(b_->AddFunctionLibrary(def));\n for (const NodeDef& node_def : f_def->node_def()) {\n const OpRegistrationData* op_reg_data = nullptr;\n TF_RETURN_IF_ERROR(lib_def.LookUp(node_def.op(), &op_reg_data));\n if (op_reg_data->is_function_op) {\n TF_RETURN_IF_ERROR(AddFunction(ctx, op_reg_data->op_def.name(), lib_def));\n }\n for (const auto& pair : node_def.attr()) {\n TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, pair.second, lib_def));\n }\n }\n for (auto iter = f_def->attr().begin(); iter != f_def->attr().end(); iter++) {\n TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, iter->second, lib_def));\n }\n return absl::OkStatus();\n}\nvoid GraphDefBuilderWrapper::AddPlaceholderInternal(const Tensor& val,\n Node** output) {\n *output = ops::SourceOp(\n \"Placeholder\",\n b_->opts().WithAttr(\"dtype\", val.dtype()).WithAttr(\"shape\", val.shape()));\n}\nvoid GraphDefBuilderWrapper::AddTensorInternal(const Tensor& val,\n Node** output) {\n *output = ops::SourceOp(\n \"Const\",\n b_->opts().WithAttr(\"dtype\", val.dtype()).WithAttr(\"value\", val));\n}\nbool GraphDefBuilderWrapper::HasAttr(const string& name,\n const string& attr_name) const {\n const OpDef* op_def = nullptr;\n Status s = b_->opts().op_registry()->LookUpOpDef(name, &op_def);\n if (!s.ok() || op_def == nullptr) {\n return false;\n }\n return HasAttr(op_def, attr_name);\n}\nint32_t GetRunnerThreadpoolSizeFromOpKernelContext(OpKernelContext* ctx) {\n thread::ThreadPool* thread_pool =\n ctx->device()->tensorflow_device_thread_pool();\n if (thread_pool) {\n return thread_pool->NumThreads();\n } else {\n static const int32_t kDefaultRunnerThreadpoolSize = port::MaxParallelism();\n return kDefaultRunnerThreadpoolSize;\n }\n}\nint64_t MemoryCheckpoint::IdRegistry::Add(const std::string& prefix,\n const std::string& key) {\n mutex_lock l(mu_);\n auto pair = std::make_pair(prefix, key);\n if (string_to_int_.contains(pair)) {\n return string_to_int_[pair];\n }\n int64_t id = next_id_++;\n int_to_string_[id] = pair;\n string_to_int_[pair] = id;\n return id;\n}\nstd::vector MemoryCheckpoint::IdRegistry::GetMatchingIds(\n const std::string& prefix_to_match) {\n mutex_lock l(mu_);\n std::vector ids;\n for (const auto& [pair, id] : string_to_int_) {\n auto [prefix, key] = pair;\n if (prefix.compare(0, prefix_to_match.length(), prefix_to_match) == 0) {\n ids.push_back(id);\n }\n }\n return ids;\n}\nstd::pair MemoryCheckpoint::IdRegistry::Get(\n int64_t id) {\n mutex_lock l(mu_);\n auto result = int_to_string_.find(id);\n DCHECK(result != int_to_string_.end())\n << \"Failed find id \" << id << \" in IdRegistry. \"\n << \"Max id is: \" << next_id_ - 1;\n return result->second;\n}\nvoid MemoryCheckpoint::IdRegistry::RemoveIds(const std::vector& ids) {\n mutex_lock l(mu_);\n for (const auto& id : ids) {\n string_to_int_.erase(int_to_string_[id]);\n int_to_string_.erase(id);\n }\n}\nstd::string MemoryCheckpoint::DebugString() const {\n std::string result = absl::StrCat(\"status=\", status_.ToString(),\n \", \"\n \"root=\",\n (is_root_ ? \"true\" : \"false\"), \"\\n\");\n absl::StrAppend(&result, \"number of integers: \", int_values_.size(), \"\\n\");\n for (const auto& [k, v] : int_values_) {\n absl::StrAppend(&result, \" \", id_registry_->Get(k).first, \":\",\n id_registry_->Get(k).second, \": \", v, \"\\n\");\n }\n absl::StrAppend(&result, \"number of strings: \", str_values_.size(), \"\\n\");\n for (const auto& [k, v] : str_values_) {\n absl::StrAppend(&result, \" \", id_registry_->Get(k).first, \":\",\n id_registry_->Get(k).second, \": \", v, \"\\n\");\n }\n absl::StrAppend(&result, \"number of tensors: \", tensor_values_.size(), \"\\n\");\n absl::StrAppend(\n &result, \"number of expired prefixes: \", expired_prefixes_.size(), \"\\n\");\n return result;\n}\nvoid MemoryCheckpoint::Merge(MemoryCheckpoint* other) {\n if (!status_.ok()) {\n return;\n }\n if (!other->status_.ok()) {\n status_ = other->status_;\n int_values_.clear();\n str_values_.clear();\n tensor_values_.clear();\n }\n for (const auto& [k, v] : other->int_values_) {\n int_values_[k] = v;\n }\n for (const auto& [k, v] : other->str_values_) {\n str_values_[k] = v;\n }\n for (const auto& [k, v] : other->tensor_values_) {\n tensor_values_[k] = v;\n }\n for (const auto& prefix : other->expired_prefixes_) {\n Purge(prefix);\n }\n other->expired_prefixes_.clear();\n VLOG(5) << \"MemoryCheckpoint::Merge \" << DebugString();\n}\nvoid MemoryCheckpoint::Purge(const std::string& prefix) {\n std::vector ids = id_registry_->GetMatchingIds(prefix);\n for (const auto& id : ids) {\n int_values_.erase(id);\n str_values_.erase(id);\n tensor_values_.erase(id);\n }\n if (!is_root_) {\n expired_prefixes_.insert(prefix);\n } else {\n id_registry_->RemoveIds(ids);\n }\n}\nStatus MemoryCheckpoint::Save(IteratorStateWriter* writer) const {\n for (const auto& [id, value] : int_values_) {\n auto [prefix, key] = id_registry_->Get(id);\n TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));\n }\n for (const auto& [id, value] : str_values_) {\n auto [prefix, key] = id_registry_->Get(id);\n TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));\n }\n for (const auto& [id, value] : tensor_values_) {\n auto [prefix, key] = id_registry_->Get(id);\n TF_RETURN_IF_ERROR(writer->WriteTensor(prefix, key, value));\n }\n return absl::OkStatus();\n}\nStatus IteratorBase::InitializeBase(IteratorContext* ctx,\n const IteratorBase* parent) {\n parent_ = parent;\n id_ =\n Hash64CombineUnordered(Hash64(prefix()), reinterpret_cast(this));\n if (parent_) {\n parent_id_ = Hash64CombineUnordered(Hash64(parent_->prefix()),\n reinterpret_cast(parent_));\n if (const auto& model = ctx->model()) {\n auto factory = [ctx, this](model::Node::Args args) {\n return CreateNode(ctx, std::move(args));\n };\n model->AddNode(std::move(factory), prefix(), parent->model_node(),\n &node_);\n cleanup_fns_.push_back([this, model]() { model->RemoveNode(node_); });\n }\n }\n return absl::OkStatus();\n}\nStatus GetCompressedElementFromVariantTensor(\n const Tensor& tensor, const CompressedElement** out_compressed_element) {\n if (!(tensor.dtype() == DT_VARIANT &&\n TensorShapeUtils::IsScalar(tensor.shape()))) {\n return errors::InvalidArgument(\n \"`CompressedElement` tensor must be a scalar of dtype `DT_VARIANT`.\");\n }\n const Variant& variant = tensor.scalar()();\n const CompressedElement* compressed_element =\n variant.get();\n if (compressed_element == nullptr) {\n return errors::InvalidArgument(\n \"Tensor must be a `CompressedElement` object.\");\n }\n *out_compressed_element = compressed_element;\n return absl::OkStatus();\n}\nint64_t GetAllocatedBytes(const std::vector& element) {\n int64_t allocated_bytes = 0;\n for (auto& tensor : element) {\n if (tensor.dtype() == DT_VARIANT) {\n DatasetBase* dataset;\n if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {\n allocated_bytes += dataset->AllocatedBytes();\n continue;\n }\n const CompressedElement* compressed_element;\n if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)\n .ok()) {\n allocated_bytes += compressed_element->ByteSizeLong();\n continue;\n }\n }\n allocated_bytes += tensor.AllocatedBytes();\n }\n return allocated_bytes;\n}\nint64_t GetTotalBytes(const std::vector& element) {\n int64_t total_bytes = 0;\n for (auto& tensor : element) {\n if (tensor.dtype() == DT_VARIANT) {\n DatasetBase* dataset;\n if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {\n total_bytes += dataset->TotalBytes();\n continue;\n }\n const CompressedElement* compressed_element;\n if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)\n .ok()) {\n total_bytes += compressed_element->ByteSizeLong();\n continue;\n }\n }\n total_bytes += tensor.TotalBytes();\n }\n return total_bytes;\n}\nstd::string FullName(const std::string& prefix, const std::string& name) {\n if (absl::StrContains(name, kColon)) {\n LOG(ERROR) << name << \" should not contain \" << kColon;\n }\n return strings::StrCat(kFullNameRandomHex, kPipe, prefix, kColon, name);\n}\nStatus ExtractIteratorPrefix(StringPiece key, string* prefix) {\n if (!absl::StartsWith(key, data::kFullNameRandomHex)) {\n return errors::InvalidArgument(\"Key: \", key,\n \" was not generated using full_name.\");\n }\n std::vector split_keys = str_util::Split(key, data::kPipe);\n if (split_keys.size() != 2) {\n return errors::InvalidArgument(\"Key: \", key,\n \" was not generated using full_name.\");\n }\n string real_key = split_keys[1];\n const int pos = real_key.rfind(kColon);\n *prefix = real_key.substr(0, pos);\n return absl::OkStatus();\n}\nStatus GetDatasetFromVariantTensor(const Tensor& tensor,\n DatasetBase** out_dataset) {\n if (!(tensor.dtype() == DT_VARIANT &&\n TensorShapeUtils::IsScalar(tensor.shape()))) {\n return errors::InvalidArgument(\n \"Dataset tensor must be a scalar of dtype DT_VARIANT.\");\n }\n const Variant& variant = tensor.scalar()();\n const DatasetVariantWrapper* wrapper = variant.get();\n if (wrapper == nullptr) {\n return errors::InvalidArgument(\"Tensor must be a Dataset object.\");\n }\n *out_dataset = wrapper->get();\n if (*out_dataset == nullptr) {\n return errors::Internal(\"Read uninitialized Dataset variant.\");\n }\n return absl::OkStatus();\n}\nStatus StoreDatasetInVariantTensor(DatasetBase* dataset, Tensor* tensor) {\n if (!(tensor->dtype() == DT_VARIANT &&\n TensorShapeUtils::IsScalar(tensor->shape()))) {\n return errors::InvalidArgument(\n \"Dataset tensor must be a scalar of dtype DT_VARIANT.\");\n }\n tensor->scalar()() = DatasetVariantWrapper(dataset);\n return absl::OkStatus();\n}\nnamespace internal {\n#define WARN_PROTO_FIELD_CONFLICT(reflection, field, field_type, src, dst) \\\n { \\\n auto source_value = reflection->Get##field_type(src, field); \\\n auto destination_value = reflection->Get##field_type(*dst, field); \\\n if (source_value != destination_value) { \\\n LOG(WARNING) << \"Changing the value of option field \" << field->name() \\\n << \" from \" << destination_value << \" to \" << source_value; \\\n } \\\n }\n#define WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst) \\\n { \\\n auto source_value = reflection->GetEnum(src, field); \\\n auto destination_value = reflection->GetEnum(*dst, field); \\\n if (source_value != destination_value) { \\\n LOG(WARNING) << \"Changing the value of option enum field \" \\\n << field->name() << \" from \" \\\n << destination_value->full_name() << \" to \" \\\n << source_value->full_name(); \\\n } \\\n }\nvoid WarnProtoConflicts(const protobuf::Message& src, protobuf::Message* dst) {\n std::vector set_src;\n std::vector set_dst;\n const protobuf::Reflection* reflection = src.GetReflection();\n reflection->ListFields(src, &set_src);\n reflection->ListFields(*dst, &set_dst);\n std::sort(set_src.begin(), set_src.end());\n std::sort(set_dst.begin(), set_dst.end());\n std::vector in_both;\n std::set_intersection(set_src.begin(), set_src.end(), set_dst.begin(),\n set_dst.end(), std::back_inserter(in_both));\n for (auto field : in_both) {\n if (field->name() == \"framework_type\") {\n continue;\n }\n if (field->type() == protobuf::FieldDescriptor::TYPE_MESSAGE) {\n WarnProtoConflicts(reflection->GetMessage(src, field),\n reflection->MutableMessage(dst, field));\n } else {\n switch (field->cpp_type()) {\n case protobuf::FieldDescriptor::CPPTYPE_INT32:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, Int32, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_INT64:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, Int64, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_UINT32:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt32, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_UINT64:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt64, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_DOUBLE:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, Double, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_FLOAT:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, Float, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_BOOL:\n WARN_PROTO_FIELD_CONFLICT(reflection, field, Bool, src, dst);\n break;\n case protobuf::FieldDescriptor::CPPTYPE_ENUM:\n WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst);\n break;\n default: {\n LOG(ERROR) << \"Unrecognized proto type for field \"\n << field->full_name();\n }\n }\n }\n }\n}\n#undef WARN_PROTO_ENUM_FIELD_CONFLICT\n#undef WARN_PROTO_FIELD_CONFLICT\nvoid MergeOptions(const protobuf::Message& source,\n protobuf::Message* destination) {\n WarnProtoConflicts(source, destination);\n destination->MergeFrom(source);\n}\nvoid MergeOptions(const protobuf::MessageLite& source,\n protobuf::MessageLite* destination) {\n destination->CheckTypeAndMergeFrom(source);\n}\n} \nvoid DatasetBase::Initialize(const Metadata& metadata) {\n Status s = ComputeNumSources();\n if (!s.ok()) {\n LOG_EVERY_N_SEC(ERROR, 10) << s;\n }\n s = MergeOptionsFromInputs();\n if (!s.ok()) {\n LOG_EVERY_N_SEC(ERROR, 10) << s;\n }\n metadata_ = metadata;\n if (metadata_.name() == \"\") {\n static std::atomic id_counter(0);\n *metadata_.mutable_name() =\n strings::StrCat(type_string(), \":\", id_counter.fetch_add(1));\n }\n}\nStatus DatasetBase::ComputeNumSources() {\n std::vector inputs;\n Status s = InputDatasets(&inputs);\n if (errors::IsUnimplemented(s)) {\n return s;\n }\n if (num_sources_ >= 0) {\n return absl::OkStatus();\n }\n num_sources_ = 0;\n if (inputs.empty()) {\n num_sources_ = 1;\n return absl::OkStatus();\n }\n for (const auto& input : inputs) {\n if (input->num_sources() < 0) {\n return errors::FailedPrecondition(\n \"Cannot compute input sources for dataset of type \", type_string(),\n \", because sources could not be computed for input dataset of type \",\n input->type_string());\n }\n num_sources_ += input->num_sources();\n }\n return absl::OkStatus();\n}\nStatus DatasetBase::CheckRandomAccessCompatible(const int64 index) const {\n CardinalityOptions options;\n options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);\n int64 cardinality = Cardinality(options);\n if (cardinality == kInfiniteCardinality ||\n cardinality == kUnknownCardinality) {\n return tensorflow::errors::FailedPrecondition(\n \"Dataset of type \", this->DebugString(), \" has \",\n cardinality == kInfiniteCardinality ? \"infinite\" : \"unknown\",\n \" cardinality, which does not support random access.\");\n }\n if (index < 0 || index >= cardinality) {\n return errors::OutOfRange(\"Index out of range [0, \", cardinality,\n \"):\", index);\n }\n return absl::OkStatus();\n}\nStatus DatasetBase::Get(OpKernelContext* ctx, int64 index,\n std::vector* out_tensors) const {\n return errors::Unimplemented(\"Random access is not implemented for dataset \",\n DebugString());\n}\nStatus DatasetBase::Get(AnyContext ctx, int64 index,\n std::vector* out_tensors) const {\n return errors::Unimplemented(\"Random access is not implemented for dataset \",\n DebugString());\n}\nabsl::StatusOr DatasetBase::Finalize(\n OpKernelContext* ctx,\n std::function>()>\n make_finalized_dataset) const {\n mutex_lock l(mu_);\n if (!finalized_dataset_) {\n TF_ASSIGN_OR_RETURN(finalized_dataset_, make_finalized_dataset());\n }\n return finalized_dataset_.get();\n}\nStatus DatasetBase::MergeOptionsFromInputs() {\n std::vector inputs;\n Status s = InputDatasets(&inputs);\n if (errors::IsUnimplemented(s)) {\n return s;\n }\n if (inputs.empty()) {\n return absl::OkStatus();\n }\n Options merged_options = inputs[0]->options_;\n for (int i = 1; i < inputs.size(); ++i) {\n internal::MergeOptions(inputs[i]->options_, &merged_options);\n }\n internal::MergeOptions(options_, &merged_options);\n options_ = merged_options;\n return absl::OkStatus();\n}\nStatus DatasetBase::MakeIterator(\n IteratorContext* ctx, const IteratorBase* parent,\n const string& output_prefix,\n std::unique_ptr* iterator) const {\n if (type_string() == \"OptionsDataset\" || type_string() == \"FinalizeDataset\") {\n std::vector inputs;\n Status s = InputDatasets(&inputs);\n return inputs[0]->MakeIterator(ctx, parent, output_prefix, iterator);\n }\n tsl::profiler::TraceMe traceme(\n [&] {\n return tsl::profiler::TraceMeEncode(\n strings::StrCat(\"MakeIterator::\", type_string()), {});\n },\n tsl::profiler::TraceMeLevel::kInfo);\n *iterator = MakeIteratorInternal(output_prefix);\n Status s = (*iterator)->InitializeBase(ctx, parent);\n if (s.ok()) {\n s.Update((*iterator)->Initialize(ctx));\n ctx->SaveCheckpoint(iterator->get());\n }\n if (!s.ok()) {\n iterator->reset();\n }\n return s;\n}\nStatus DatasetBase::MakeSplitProviders(\n std::vector>* split_providers) const {\n std::vector inputs;\n Status s = InputDatasets(&inputs);\n if (errors::IsUnimplemented(s)) {\n return errors::Unimplemented(\n \"Cannot create split providers for dataset of type \", type_string(),\n \", because the dataset implements neither `InputDatasets` nor \"\n \"`MakeSplitProvider`.\");\n }\n if (inputs.size() != 1) {\n return errors::Unimplemented(\n \"Cannot create split providers for dataset of type \", type_string(),\n \", because the dataset is not unary (instead having arity \",\n inputs.size(),\n \"), and no custom implementation of `MakeSplitProvider` is defined.\");\n }\n return inputs[0]->MakeSplitProviders(split_providers);\n}\nstd::optional DatasetBase::GetEstimatedElementSize() const {\n const auto& shapes = output_shapes();\n const auto& dtypes = output_dtypes();\n if (shapes.size() != dtypes.size()) {\n LOG(ERROR) << \"This should not happen because the sizes of output_shapes() \"\n \"and output_dtypes() should always be \"\n \"the same.\";\n return std::nullopt;\n }\n size_t num_outputs = shapes.size();\n int64_t element_size = 0;\n for (int i = 0; i < num_outputs; ++i) {\n const auto& partial_shape = shapes[i];\n const auto& dtype = dtypes[i];\n auto num_elements = partial_shape.num_elements();\n if (num_elements == -1) {\n return std::nullopt;\n }\n element_size += num_elements * DataTypeSize(dtype);\n }\n return element_size;\n}\nint64_t DatasetBase::Cardinality() const {\n mutex_lock l(cardinality_mu_);\n if (cardinality_ == kUnknownCardinality) {\n CardinalityOptions options;\n cardinality_ = CardinalityInternal(options);\n }\n return cardinality_;\n}\nint64_t DatasetBase::Cardinality(CardinalityOptions options) const {\n mutex_lock l(cardinality_mu_);\n if (cardinality_ == kUnknownCardinality) {\n cardinality_ = CardinalityInternal(options);\n }\n return cardinality_;\n}\nStatus DatasetBase::InputDatasets(\n std::vector* inputs) const {\n return errors::Unimplemented(\n \"Cannot compute input sources for dataset of type \", type_string(),\n \", because the dataset does not implement `InputDatasets`. To fix this, \"\n \"your dataset should override the `InputDatasets` method. If it is a \"\n \"source dataset, it should return empty inputs.\");\n}\nStatus DatasetBase::DatasetGraphDefBuilder::AddInputDataset(\n SerializationContext* ctx, const DatasetBase* dataset, Node** output) {\n Status status = dataset->AsGraphDefInternal(ctx, this, output);\n if (ctx->is_graph_rewrite()) {\n if (status.ok()) {\n (*output)->AddAttr(kCardinalityAttrForRewrite, dataset->Cardinality());\n } else if (errors::IsUnimplemented(status)) {\n Tensor t(DT_VARIANT, TensorShape({}));\n dataset->Ref();\n TF_RETURN_IF_ERROR(\n StoreDatasetInVariantTensor(const_cast(dataset), &t));\n TF_RETURN_IF_ERROR(AddPlaceholder(t, output));\n DCHECK_NE(ctx->input_list(), nullptr);\n ctx->input_list()->emplace_back((*output)->name(), std::move(t));\n LOG_EVERY_N_SEC(WARNING, 30)\n << \"Input of \" << dataset->DebugString()\n << \" will not be optimized because the dataset does not implement \"\n \"the \"\n \"AsGraphDefInternal() method needed to apply optimizations.\";\n return absl::OkStatus();\n }\n }\n return status;\n}\nStatus DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensor(\n SerializationContext* ctx, const Tensor& t, Node** output) {\n if (t.dtype() == DT_VARIANT) {\n Status s = AddDatasetOrTensorHelper(ctx, t, output);\n if (s.ok()) {\n return s;\n }\n }\n if (t.dtype() == DT_RESOURCE && !ctx->is_graph_rewrite()) {\n Status s = AddResourceHelper(ctx, t, output);\n if (!errors::IsUnimplemented(s)) {\n return s;\n }\n }\n return AddTensor(t, output);\n}\nStatus DatasetBase::DatasetGraphDefBuilder::AddIdentity(\n SerializationContext* ctx, const std::string& name_prefix, Node** input,\n Node** output) {\n *output =\n ops::UnaryOp(\"Identity\", *input,\n builder()->opts().WithName(UniqueNodeName(name_prefix)));\n return absl::OkStatus();\n}\nStatus DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensorHelper(\n SerializationContext* ctx, const Tensor& t, Node** output) {\n if (t.dims() == 0) {\n DatasetBase* dataset;\n TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(t, &dataset));\n return AddInputDataset(ctx, dataset, output);\n }\n std::vector nodes;\n for (int i = 0; i < t.dim_size(0); ++i) {\n Node* node;\n TF_RETURN_IF_ERROR(AddDatasetOrTensorHelper(ctx, t.SubSlice(i), &node));\n nodes.emplace_back(node);\n }\n auto op_name = \"Pack\";\n auto opts = builder()->opts();\n NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,\n opts.op_registry());\n node_builder.Input(std::move(nodes));\n *output = opts.FinalizeBuilder(&node_builder);\n return absl::OkStatus();\n}\nStatus DatasetBase::DatasetGraphDefBuilder::AddResourceHelper(\n SerializationContext* ctx, const Tensor& t, Node** output) {\n if (t.NumElements() == 0) {\n return errors::InvalidArgument(\"Empty resouce handle\");\n }\n const ResourceHandle& handle = t.flat()(0);\n if (ctx->device_name() != handle.device()) {\n return errors::InvalidArgument(\"Trying to access resource \", handle.name(),\n \" located in device \", handle.device(),\n \" from device \", ctx->device_name());\n }\n ResourceBase* resource;\n TF_RETURN_IF_ERROR(ctx->resource_mgr()->Lookup(handle, &resource));\n core::ScopedUnref unref(resource);\n return resource->AsGraphDef(builder(), output);\n}\nDatasetBaseIterator::DatasetBaseIterator(const BaseParams& params)\n : params_(params) {\n params_.dataset->Ref();\n VLOG(2) << prefix() << \" constructor\";\n strings::StrAppend(&traceme_metadata_, \"name=\", dataset()->metadata().name());\n strings::StrAppend(&traceme_metadata_, \",shapes=\");\n auto& shapes = output_shapes();\n for (int i = 0; i < shapes.size(); ++i) {\n if (i > 0) {\n strings::StrAppend(&traceme_metadata_, \" \");\n }\n strings::StrAppend(&traceme_metadata_, shapes.at(i).DebugString());\n }\n strings::StrAppend(&traceme_metadata_, \",types=\");\n auto& types = output_dtypes();\n for (int i = 0; i < types.size(); ++i) {\n if (i > 0) {\n strings::StrAppend(&traceme_metadata_, \" \");\n }\n strings::StrAppend(&traceme_metadata_, DataTypeString(types.at(i)));\n }\n}\nDatasetBaseIterator::~DatasetBaseIterator() {\n VLOG(2) << prefix() << \" destructor\";\n params_.dataset->Unref();\n}\nstring DatasetBaseIterator::BuildTraceMeName() {\n string result =\n strings::StrCat(params_.prefix, \"#\", traceme_metadata_, \",id=\", id_);\n if (parent_) {\n strings::StrAppend(&result, \",parent_id=\", parent_id_);\n }\n TraceMeMetadata metadata = GetTraceMeMetadata();\n for (const auto& pair : metadata) {\n strings::StrAppend(&result, \",\", pair.first, \"=\", pair.second);\n }\n if (model_node() != nullptr) {\n if (model_node()->buffered_elements() > 0) {\n strings::StrAppend(\n &result, \",buffered_elements=\",\n static_cast(model_node()->buffered_elements()));\n strings::StrAppend(\n &result, \",buffered_bytes_MB=\",\n static_cast(\n static_cast(model_node()->buffered_bytes()) * 1e-6));\n }\n }\n strings::StrAppend(&result, \"#\");\n return result;\n}\nStatus DatasetBaseIterator::GetNext(IteratorContext* ctx,\n std::vector* out_tensors,\n bool* end_of_sequence) {\n activity_watcher::ActivityScope activity_scope([&]() {\n activity_watcher::Activity::Attributes attributes;\n attributes[\"iterator_prefix\"] = prefix();\n return std::make_unique(\n \"Iterator::GetNext\", activity_watcher::ActivityCategory::kDatasetOp,\n std::move(attributes));\n });\n tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },\n tsl::profiler::TraceMeLevel::kInfo);\n DVLOG(3) << prefix() << \" GetNext enter\";\n auto model = ctx->model();\n bool output_was_recording =\n node_ && node_->output() && node_->output()->is_recording();\n if (collect_resource_usage(ctx)) {\n int64_t now_nanos = EnvTime::NowNanos();\n if (output_was_recording) {\n node_->output()->record_stop(now_nanos);\n }\n node_->record_start(now_nanos);\n }\n out_tensors->clear();\n Status s = GetNextInternal(ctx, out_tensors, end_of_sequence);\n ctx->SaveCheckpoint(this);\n if (!SymbolicCheckpointCompatible()) {\n ctx->UpdateCheckpointStatus([this]() {\n return errors::Unimplemented(dataset()->type_string(),\n \" does not support symbolic checkpointing.\");\n });\n }\n if (TF_PREDICT_TRUE(s.ok())) {\n if (TF_PREDICT_TRUE(!*end_of_sequence)) {\n if (TF_PREDICT_FALSE(out_tensors->size() !=\n dataset()->output_dtypes().size())) {\n return errors::Internal(\"Expected \", dataset()->output_dtypes().size(),\n \" components but got \", out_tensors->size(),\n \".\");\n }\n RecordElement(ctx, out_tensors);\n } else {\n out_tensors->clear();\n }\n }\n if (collect_resource_usage(ctx)) {\n int64_t now_nanos = EnvTime::NowNanos();\n node_->record_stop(now_nanos);\n if (output_was_recording) {\n node_->output()->record_start(now_nanos);\n }\n }\n if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {\n s = errors::Internal(\"Iterator \\\"\", params_.prefix,\n \"\\\" returned `OutOfRange`. This indicates an \"\n \"implementation error as `OutOfRange` errors are not \"\n \"expected to be returned here. Original message: \",\n s.message());\n LOG(ERROR) << s;\n }\n DVLOG(3) << prefix() << \" GetNext exit\";\n return s;\n}\nStatus DatasetBaseIterator::Skip(IteratorContext* ctx, int num_to_skip,\n bool* end_of_sequence, int* num_skipped) {\n tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },\n tsl::profiler::TraceMeLevel::kInfo);\n DVLOG(3) << prefix() << \" Skip enter\";\n auto model = ctx->model();\n bool output_was_recording =\n node_ && node_->output() && node_->output()->is_recording();\n if (collect_resource_usage(ctx)) {\n int64_t now_nanos = EnvTime::NowNanos();\n auto output = node_->output();\n if (output_was_recording) {\n output->record_stop(now_nanos);\n }\n node_->record_start(now_nanos);\n }\n Status s = SkipInternal(ctx, num_to_skip, end_of_sequence, num_skipped);\n if (collect_resource_usage(ctx)) {\n int64_t now_nanos = EnvTime::NowNanos();\n node_->record_stop(now_nanos);\n auto output = node_->output();\n if (output_was_recording) {\n output->record_start(now_nanos);\n }\n }\n if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {\n s = errors::Internal(\"Iterator \\\"\", params_.prefix,\n \"\\\" returned `OutOfRange`. This indicates an \"\n \"implementation error as `OutOfRange` errors are not \"\n \"expected to be returned here. Original message: \",\n s.message());\n LOG(ERROR) << s;\n }\n DVLOG(3) << prefix() << \" Skip exit\";\n return s;\n}\nStatus DatasetBaseIterator::SkipInternal(IteratorContext* ctx, int num_to_skip,\n bool* end_of_sequence,\n int* num_skipped) {\n *num_skipped = 0;\n for (int i = 0; i < num_to_skip; ++i) {\n std::vector out_tensors;\n TF_RETURN_IF_ERROR(GetNextInternal(ctx, &out_tensors, end_of_sequence));\n if (*end_of_sequence) {\n return absl::OkStatus();\n }\n RecordElement(ctx, &out_tensors);\n (*num_skipped)++;\n }\n return absl::OkStatus();\n}\nvoid DatasetOpKernel::Compute(OpKernelContext* ctx) {\n DatasetBase* dataset = nullptr;\n MakeDataset(ctx, &dataset);\n if (ctx->status().ok()) {\n Tensor* output = nullptr;\n OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));\n OP_REQUIRES_OK(ctx, StoreDatasetInVariantTensor(dataset, output));\n if (ctx->stack_trace().has_value() && VLOG_IS_ON(4)) {\n VLOG(4) << \"Dataset \" << dataset->type_string()\n << \" created using the following stack trace:\";\n for (const auto& stack_frame : ctx->stack_trace()->ToStackFrames(\n {}, {}, false, -1)) {\n VLOG(4) << stack_frame.file_name << \":\" << stack_frame.line_number\n << \" in \" << stack_frame.function_name << \"()\";\n }\n }\n dataset->Initialize(metadata_);\n }\n}\nstring DatasetOpKernel::TraceString(const OpKernelContext& ctx,\n bool verbose) const {\n return tsl::profiler::TraceMeOp(name_view(), type_string_view());\n}\nbool DatasetOpKernel::IsDatasetOp(const OpDef& op_def) {\n if (op_def.output_arg_size() != 1) return false;\n if (op_def.output_arg(0).type() != DT_VARIANT) return false;\n absl::string_view op_name = op_def.name();\n std::vector v1, v2; \n if (absl::StartsWith(op_name, \"__wrapped__\")) {\n v1 = absl::StrSplit(op_name, \"__wrapped__\", absl::SkipEmpty());\n if (v1.empty()) return false;\n v2 = absl::StrSplit(v1[0], \"_\", absl::SkipEmpty());\n op_name = v2.empty() ? v1[0] : v2[0];\n }\n if (op_name == \"DatasetFromGraph\") return true;\n if (absl::EndsWith(op_name, \"Dataset\")) return true;\n size_t index = op_name.length() - 1;\n while (index >= 0 && isdigit(op_name[index])) {\n index--;\n }\n constexpr absl::string_view kDatasetPrefix = \"DatasetV\";\n constexpr absl::string_view::size_type kPrefixLength = kDatasetPrefix.size();\n if (index < kPrefixLength - 1 || index == op_name.length() - 1) return false;\n return op_name.substr(index - kPrefixLength + 1, kPrefixLength) ==\n kDatasetPrefix;\n}\nvoid UnaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,\n DatasetBase** output) {\n DatasetBase* input;\n OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));\n MakeDataset(ctx, input, output);\n}\nvoid BinaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,\n DatasetBase** output) {\n DatasetBase* input;\n OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));\n DatasetBase* another_input;\n OP_REQUIRES_OK(ctx,\n GetDatasetFromVariantTensor(ctx->input(1), &another_input));\n MakeDataset(ctx, input, another_input, output);\n}\nconst char DatasetBase::kDatasetGraphKey[] = \"_DATASET_GRAPH\";\nconst char DatasetBase::kDatasetGraphOutputNodeKey[] =\n \"_DATASET_GRAPH_OUTPUT_NODE\";\nBackgroundWorker::BackgroundWorker(Env* env, const char* name)\n : env_(env), name_(name) {}\nBackgroundWorker::~BackgroundWorker() {\n {\n mutex_lock l(mu_);\n cancelled_ = true;\n }\n cond_var_.notify_one();\n thread_.reset();\n}\nvoid BackgroundWorker::Schedule(std::function work_item) {\n {\n mutex_lock l(mu_);\n if (!thread_) {\n thread_ = absl::WrapUnique(env_->StartThread(\n {} , name_, [this]() { WorkerLoop(); }));\n }\n work_queue_.push_back(std::move(work_item));\n }\n cond_var_.notify_one();\n}\nvoid BackgroundWorker::WorkerLoop() {\n tensorflow::ResourceTagger tag(kTFDataResourceTag, \"Background\");\n while (true) {\n std::function work_item = nullptr;\n {\n mutex_lock l(mu_);\n while (!cancelled_ && work_queue_.empty()) {\n cond_var_.wait(l);\n }\n if (cancelled_) {\n return;\n }\n DCHECK(!work_queue_.empty());\n work_item = std::move(work_queue_.front());\n work_queue_.pop_front();\n }\n DCHECK(work_item != nullptr);\n work_item();\n }\n}\nnamespace {\nclass RunnerImpl : public Runner {\n public:\n void Run(const std::function& f) override {\n tensorflow::ResourceTagger tag(kTFDataResourceTag, \"Runner\");\n f();\n PreventTailCall();\n }\n private:\n virtual void PreventTailCall() {}\n};\n} \nRunner* Runner::get() {\n static Runner* singleton = new RunnerImpl;\n return singleton;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorflow/core/framework/dataset.h\"\n#include \n#include \n#include \n#include \"absl/container/flat_hash_set.h\"\n#include \"absl/status/status.h\"\n#include \"xla/tsl/lib/core/status_test_util.h\"\n#include \"tensorflow/core/framework/tensor_testutil.h\"\n#include \"tensorflow/core/framework/types.pb.h\"\n#include \"tensorflow/core/platform/status.h\"\n#include \"tensorflow/core/platform/test.h\"\nnamespace tensorflow {\nnamespace data {\nTEST(DatasetTest, FullName) {\n EXPECT_EQ(FullName(\"prefix\", \"name\"),\n \"60d899aa0d8ce4351e7c3b419e92d25b|prefix:name\");\n}\nenum DataTypeTest {\n _tf_int_32,\n _tf_int_64,\n _tf_float_,\n _tf_double_,\n _tf_string_\n};\nstruct DatasetTestParam {\n const DataTypeTest type;\n std::function()> tensor_factory;\n const int64_t expected_bytes;\n};\nclass DatasetTestTotalBytes\n : public ::testing::TestWithParam {};\nTEST_P(DatasetTestTotalBytes, TestTotalBytes) {\n const DatasetTestParam& test_case = GetParam();\n if (test_case.type == _tf_string_) {\n EXPECT_LE(GetTotalBytes(test_case.tensor_factory()),\n test_case.expected_bytes);\n } else {\n EXPECT_EQ(GetTotalBytes(test_case.tensor_factory()),\n test_case.expected_bytes);\n }\n}\nstd::vector tensor_tf_int_32s() {\n return {test::AsTensor({1, 2, 3, 4, 5}),\n test::AsTensor({1, 2, 3, 4})};\n}\nstd::vector tensor_tf_int_64s() {\n return {test::AsTensor({1, 2, 3, 4, 5}),\n test::AsTensor({10, 12})};\n}\nstd::vector tensor_tf_float_s() {\n return {test::AsTensor({1.0, 2.0, 3.0, 4.0})};\n}\nstd::vector tensor_tf_double_s() {\n return {test::AsTensor({100.0}), test::AsTensor({200.0}),\n test::AsTensor({400.0}), test::AsTensor({800.0})};\n}\nconst tstring str = \"test string\"; \nstd::vector tensor_strs() { return {test::AsTensor({str})}; }\nINSTANTIATE_TEST_SUITE_P(\n DatasetTestTotalBytes, DatasetTestTotalBytes,\n ::testing::ValuesIn(std::vector{\n {_tf_int_32, tensor_tf_int_32s, 4 * 9 },\n {_tf_int_64, tensor_tf_int_64s, 8 * 7 },\n {_tf_float_, tensor_tf_float_s, 4 * 4 },\n {_tf_double_, tensor_tf_double_s, 8 * 4 },\n {_tf_string_, tensor_strs,\n static_cast(sizeof(str) + str.size()) }}));\nstruct MergeOptionsTestParam {\n const std::string source;\n const std::string destination;\n const std::string expected;\n};\nclass MergeOptionsTest\n : public ::testing::TestWithParam {};\nTEST_P(MergeOptionsTest, MergeOptions) {\n const MergeOptionsTestParam& test_case = GetParam();\n Options source;\n CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.source,\n &source));\n Options destination;\n CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.destination,\n &destination));\n Options expected;\n CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.expected,\n &expected));\n internal::MergeOptions(source, &destination);\n EXPECT_EQ(expected.SerializeAsString(), destination.SerializeAsString());\n}\nINSTANTIATE_TEST_SUITE_P(\n MergeOptionsTest, MergeOptionsTest,\n ::testing::ValuesIn(std::vector{\n {\"deterministic: false\", \"\",\n \"deterministic: false\"},\n {\"deterministic: false\",\n \"deterministic: false\",\n \"deterministic: false\"},\n {\"deterministic: false\",\n \"deterministic: true\",\n \"deterministic: false\"},\n {\"external_state_policy: POLICY_IGNORE\",\n \"external_state_policy: POLICY_FAIL\",\n \"external_state_policy: POLICY_IGNORE\"}}));\nTEST(DatasetTest, IsDatasetOp) {\n OpDef op_def;\n EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));\n op_def.add_output_arg()->set_type(DT_STRING);\n EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));\n op_def.mutable_output_arg(0)->set_type(DT_VARIANT);\n op_def.set_name(\"Identity\");\n EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));\n for (const auto& name : {\"Dataset\", \"RangeDataset\", \"MapDatasetV1\",\n \"ParallelInterleaveDatasetV42\",\n \"DataServiceDatasetV1000\", \"DatasetFromGraph\"}) {\n op_def.set_name(name);\n EXPECT_TRUE(DatasetOpKernel::IsDatasetOp(op_def));\n }\n}\nTEST(DatasetTest, IdRegistry) {\n MemoryCheckpoint::IdRegistry id_registry;\n auto id_1 = id_registry.Add(\"foo\", \"key_1\");\n auto id_2 = id_registry.Add(\"foo:bar\", \"key_2\");\n auto id_3 = id_registry.Add(\"foo:bar:baz\", \"key_3\");\n auto [prefix_1, key_1] = id_registry.Get(id_1);\n EXPECT_EQ(prefix_1, \"foo\");\n EXPECT_EQ(key_1, \"key_1\");\n auto [prefix_2, key_2] = id_registry.Get(id_2);\n EXPECT_EQ(prefix_2, \"foo:bar\");\n EXPECT_EQ(key_2, \"key_2\");\n auto [prefix_3, key_3] = id_registry.Get(id_3);\n EXPECT_EQ(prefix_3, \"foo:bar:baz\");\n EXPECT_EQ(key_3, \"key_3\");\n auto matching_ids = id_registry.GetMatchingIds(\"hello\");\n EXPECT_EQ(matching_ids.size(), 0);\n matching_ids = id_registry.GetMatchingIds(\"foo:bar:baz\");\n EXPECT_EQ(matching_ids.size(), 1);\n matching_ids = id_registry.GetMatchingIds(\"foo:bar\");\n EXPECT_EQ(matching_ids.size(), 2);\n matching_ids = id_registry.GetMatchingIds(\"foo\");\n EXPECT_EQ(matching_ids.size(), 3);\n matching_ids = id_registry.GetMatchingIds(\"f\");\n EXPECT_EQ(matching_ids.size(), 3);\n absl::flat_hash_set matching_ids_set(matching_ids.begin(),\n matching_ids.end());\n EXPECT_TRUE(matching_ids_set.contains(id_1));\n EXPECT_TRUE(matching_ids_set.contains(id_2));\n EXPECT_TRUE(matching_ids_set.contains(id_3));\n id_registry.RemoveIds(matching_ids);\n matching_ids = id_registry.GetMatchingIds(\"foo\");\n EXPECT_EQ(matching_ids.size(), 0);\n}\nTEST(DatasetTest, MemoryCheckpointWrites) {\n std::shared_ptr id_registry =\n std::make_shared();\n MemoryCheckpoint memory_checkpoint(id_registry);\n Tensor input_tensor(DT_FLOAT, {1});\n input_tensor.flat()(0) = 2.0f;\n TF_EXPECT_OK(memory_checkpoint.WriteScalar(\"name_foo\", \"key_bar\", 5));\n TF_EXPECT_OK(\n memory_checkpoint.WriteTensor(\"name_corgi\", \"key_baz\", input_tensor));\n auto matching_ids = id_registry->GetMatchingIds(\"name_foo\");\n EXPECT_EQ(matching_ids.size(), 1);\n auto id = matching_ids.at(0);\n auto [_, key] = id_registry->Get(id);\n EXPECT_EQ(key, \"key_bar\");\n matching_ids = id_registry->GetMatchingIds(\"name_corgi\");\n EXPECT_EQ(matching_ids.size(), 1);\n id = matching_ids.at(0);\n std::tie(_, key) = id_registry->Get(id);\n EXPECT_EQ(key, \"key_baz\");\n}\n} \n} "},"Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset_test.cc"},"Commit Hash":{"kind":"string","value":"4a29233a7b7c1a3a4294e4ccdd1772f9083944ea"}}},{"rowIdx":299,"cells":{"ID":{"kind":"string","value":"9f7a8b01-7a36-410b-8f64-3545915fd10c"},"Language":{"kind":"string","value":"cpp"},"Repository Name":{"kind":"string","value":"google/tensorstore"},"File Name":{"kind":"string","value":"downsample_array"},"File Path in Repository":{"kind":"string","value":"tensorstore/driver/downsample/downsample_array.cc"},"File Path for Unit Test":{"kind":"string","value":"tensorstore/driver/downsample/downsample_array_test.cc"},"Code":{"kind":"string","value":"#include \"tensorstore/driver/downsample/downsample_array.h\"\n#include \"absl/status/status.h\"\n#include \"tensorstore/array.h\"\n#include \"tensorstore/downsample_method.h\"\n#include \"tensorstore/driver/downsample/downsample_nditerable.h\"\n#include \"tensorstore/driver/downsample/downsample_util.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_space/dim_expression.h\"\n#include \"tensorstore/index_space/transformed_array.h\"\n#include \"tensorstore/internal/arena.h\"\n#include \"tensorstore/internal/nditerable.h\"\n#include \"tensorstore/internal/nditerable_array.h\"\n#include \"tensorstore/internal/nditerable_copy.h\"\n#include \"tensorstore/internal/nditerable_transformed_array.h\"\n#include \"tensorstore/util/result.h\"\n#include \"tensorstore/util/span.h\"\n#include \"tensorstore/util/status.h\"\nnamespace tensorstore {\nnamespace internal_downsample {\nnamespace {\nabsl::Status ValidateDownsampleDomain(BoxView<> base_domain,\n BoxView<> downsampled_domain,\n span downsample_factors,\n DownsampleMethod method) {\n const DimensionIndex rank = base_domain.rank();\n if (rank != downsampled_domain.rank()) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Cannot downsample domain \", base_domain, \" to domain \",\n downsampled_domain, \" with different rank\"));\n }\n if (rank != downsample_factors.size()) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Cannot downsample domain \", base_domain, \" with downsample factors \",\n downsample_factors, \" of different rank\"));\n }\n for (DimensionIndex i = 0; i < rank; ++i) {\n const auto expected_interval =\n DownsampleInterval(base_domain[i], downsample_factors[i], method);\n if (expected_interval != downsampled_domain[i]) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Cannot downsample array with domain \", base_domain, \" by factors \",\n downsample_factors, \" with method \", method, \" to array with domain \",\n downsampled_domain, \": expected target dimension \", i,\n \" to have domain \", expected_interval));\n }\n }\n return absl::OkStatus();\n}\n} \nabsl::Status DownsampleArray(OffsetArrayView source,\n OffsetArrayView target,\n span downsample_factors,\n DownsampleMethod method) {\n if (source.dtype() != target.dtype()) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Source data type (\", source.dtype(),\n \") does not match target data type (\", target.dtype(), \")\"));\n }\n TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));\n TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleDomain(\n source.domain(), target.domain(), downsample_factors, method));\n if (method == DownsampleMethod::kStride) {\n return CopyTransformedArray(\n source | tensorstore::AllDims().Stride(downsample_factors), target);\n }\n internal::DefaultNDIterableArena arena;\n auto base_iterable = GetArrayNDIterable(UnownedToShared(source), arena);\n auto target_iterable = GetArrayNDIterable(UnownedToShared(target), arena);\n auto downsampled_iterable = DownsampleNDIterable(\n std::move(base_iterable), source.domain(), downsample_factors, method,\n downsample_factors.size(), arena);\n internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,\n target.shape(), skip_repeated_elements,\n arena);\n return copier.Copy();\n}\nResult> DownsampleArray(\n OffsetArrayView source, span downsample_factors,\n DownsampleMethod method) {\n SharedOffsetArray target;\n target.layout().set_rank(source.rank());\n DownsampleBounds(source.domain(),\n MutableBoxView<>(target.origin(), target.shape()),\n downsample_factors, method);\n target.element_pointer() = AllocateArrayElementsLike(\n StridedLayoutView(\n target.rank(), target.origin().data(), target.shape().data(),\n source.byte_strides().data()),\n target.byte_strides().data(), skip_repeated_elements, default_init,\n source.dtype());\n TENSORSTORE_RETURN_IF_ERROR(\n DownsampleArray(source, target, downsample_factors, method));\n return target;\n}\nabsl::Status DownsampleTransformedArray(TransformedArrayView source,\n TransformedArrayView target,\n span downsample_factors,\n DownsampleMethod method) {\n if (source.dtype() != target.dtype()) {\n return absl::InvalidArgumentError(tensorstore::StrCat(\n \"Source data type (\", source.dtype(),\n \") does not match target data type (\", target.dtype(), \")\"));\n }\n TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));\n TENSORSTORE_RETURN_IF_ERROR(\n ValidateDownsampleDomain(source.domain().box(), target.domain().box(),\n downsample_factors, method));\n if (method == DownsampleMethod::kStride) {\n return CopyTransformedArray(\n std::move(source) | tensorstore::AllDims().Stride(downsample_factors),\n target);\n }\n internal::DefaultNDIterableArena arena;\n TENSORSTORE_ASSIGN_OR_RETURN(\n auto base_iterable,\n GetTransformedArrayNDIterable(UnownedToShared(source), arena));\n TENSORSTORE_ASSIGN_OR_RETURN(\n auto target_iterable,\n GetTransformedArrayNDIterable(UnownedToShared(target), arena));\n auto downsampled_iterable = DownsampleNDIterable(\n std::move(base_iterable), source.domain().box(), downsample_factors,\n method, downsample_factors.size(), arena);\n internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,\n target.shape(), skip_repeated_elements,\n arena);\n return copier.Copy();\n}\nResult> DownsampleTransformedArray(\n TransformedArrayView source,\n span downsample_factors, DownsampleMethod method) {\n SharedOffsetArray target;\n target.layout().set_rank(source.rank());\n DownsampleBounds(source.domain().box(),\n MutableBoxView<>(target.origin(), target.shape()),\n downsample_factors, method);\n target =\n AllocateArray(target.domain(), c_order, default_init, source.dtype());\n TENSORSTORE_RETURN_IF_ERROR(DownsampleTransformedArray(\n source, TransformedArray(target), downsample_factors, method));\n return target;\n}\n} \n} "},"Unit Test - (Ground Truth)":{"kind":"string","value":"#include \"tensorstore/driver/downsample/downsample_array.h\"\n#include \n#include \n#include \n#include \n#include \"tensorstore/array.h\"\n#include \"tensorstore/array_testutil.h\"\n#include \"tensorstore/data_type.h\"\n#include \"tensorstore/downsample_method.h\"\n#include \"tensorstore/index.h\"\n#include \"tensorstore/index_space/dim_expression.h\"\n#include \"tensorstore/index_space/transformed_array.h\"\n#include \"tensorstore/util/span.h\"\nnamespace {\nusing ::tensorstore::Dims;\nusing ::tensorstore::DownsampleMethod;\nusing ::tensorstore::Index;\nusing ::tensorstore::kImplicit;\nusing ::tensorstore::MakeArray;\nusing ::tensorstore::MakeOffsetArray;\nusing ::tensorstore::span;\nusing ::tensorstore::internal_downsample::DownsampleArray;\nusing ::tensorstore::internal_downsample::DownsampleTransformedArray;\nusing ::testing::Optional;\nTEST(DownsampleArrayTest, MeanRank0) {\n EXPECT_THAT(DownsampleArray(tensorstore::MakeScalarArray(42.0),\n span(), DownsampleMethod::kMean),\n Optional(tensorstore::MakeScalarArray(42.0)));\n}\nTEST(DownsampleArrayTest, MeanRank1ExactMultiple) {\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2, 5, 7}),\n span({2}), DownsampleMethod::kMean),\n Optional(MakeArray({1.5, 6})));\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2, 3, 5, 7, 12}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({2, 8})));\n}\nTEST(DownsampleArrayTest, MeanRoundingUint8) {\n EXPECT_THAT(DownsampleArray(MakeArray({253, 254, 254}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({254})));\n}\nTEST(DownsampleArrayTest, MeanRoundingInt16) {\n EXPECT_THAT(DownsampleArray(MakeArray({-253, -254, -254}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({-254})));\n}\nTEST(DownsampleArrayTest, MeanRoundingToEvenInt16) {\n EXPECT_THAT(DownsampleArray(MakeArray({3, 3, 2, 2}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({2})));\n EXPECT_THAT(DownsampleArray(MakeArray({3, 3, 4, 4}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({4})));\n EXPECT_THAT(DownsampleArray(MakeArray({-3, -3, -2, -2}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({-2})));\n EXPECT_THAT(DownsampleArray(MakeArray({-3, -3, -4, -4}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({-4})));\n}\nTEST(DownsampleArrayTest, MeanRoundingUint64) {\n EXPECT_THAT(DownsampleArray(MakeArray({253, 254, 254}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({254})));\n}\nTEST(DownsampleArrayTest, MeanRoundingBool) {\n EXPECT_THAT(DownsampleArray(MakeArray({0, 0, 1}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({0})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 1, 1}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({1})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 1, 1, 0}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({0})));\n}\nTEST(DownsampleArrayTest, MeanRank1Offset) {\n EXPECT_THAT(DownsampleArray(MakeOffsetArray({1}, {1, 2, 5, 9}),\n span({2}), DownsampleMethod::kMean),\n Optional(MakeArray({1, 3.5, 9})));\n}\nTEST(DownsampleArrayTest, MeanRank1SingleDownsampledElement) {\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2}), span({2}),\n DownsampleMethod::kMean),\n Optional(MakeArray({1.5})));\n}\nTEST(DownsampleArrayTest, MeanRank1NotExactMultiple) {\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2, 5, 7, 9}),\n span({2}), DownsampleMethod::kMean),\n Optional(MakeArray({1.5, 6, 9})));\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2, 6, 7, 9}),\n span({3}), DownsampleMethod::kMean),\n Optional(MakeArray({3, 8})));\n}\nTEST(DownsampleArrayTest, MeanRank1NoDownsampling) {\n EXPECT_THAT(DownsampleArray(MakeArray({1, 2, 5, 7}),\n span({1}), DownsampleMethod::kMean),\n Optional(MakeArray({1, 2, 5, 7})));\n}\nTEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim1) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({\n {1, 2, 5, 7},\n {5, 6, 15, 25},\n }),\n span({1, 2}), DownsampleMethod::kMean),\n Optional(MakeArray({{1.5, 6}, {5.5, 20}})));\n}\nTEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim0) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({\n {1, 2, 5, 7},\n {5, 6, 15, 25},\n }),\n span({2, 1}), DownsampleMethod::kMean),\n Optional(MakeArray({{3, 4, 10, 16}})));\n}\nTEST(DownsampleArrayTest, MeanRank2TwoDownsampleDims) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({\n {1, 2, 5, 7},\n {5, 6, 15, 25},\n }),\n span({2, 2}), DownsampleMethod::kMean),\n Optional(MakeArray({{3.5, 13.0}})));\n}\nTEST(DownsampleArrayTest, MeanRank2NotExactMultiple) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({\n {1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15},\n }),\n span({2, 2}), DownsampleMethod::kMean),\n Optional(MakeArray({\n {4, 6, 7.5},\n {11.5, 13.5, 15},\n })));\n}\nTEST(DownsampleArrayTest, MeanRank2PartialStartBlock) {\n EXPECT_THAT(\n DownsampleArray(MakeOffsetArray({3, 8}, {{1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15}}),\n span({2, 3}), DownsampleMethod::kMean),\n Optional(MakeOffsetArray({1, 2}, {{1, 3, 5}, {8.5, 10.5, 12.5}})));\n}\nTEST(DownsampleArrayTest, MedianRank2PartialStartBlock) {\n EXPECT_THAT(\n DownsampleArray(MakeOffsetArray({3, 8}, {{1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15}}),\n span({2, 3}), DownsampleMethod::kMedian),\n Optional(MakeOffsetArray({1, 2}, {{1, 3, 5}, {6, 9, 10}})));\n}\nTEST(DownsampleArrayTest, ModeRank2PartialStartBlock) {\n EXPECT_THAT(\n DownsampleArray(MakeOffsetArray({3, 8},\n {\n {1, 2, 3, 3, 5},\n {6, 4, 5, 5, 10},\n {11, 6, 6, 6, 15},\n }),\n span({2, 3}), DownsampleMethod::kMode),\n Optional(MakeOffsetArray({1, 2}, {{1, 3, 5}, {6, 6, 10}})));\n}\nTEST(DownsampleArrayTest, StrideRank2PartialEndBlock) {\n EXPECT_THAT(\n DownsampleArray(MakeOffsetArray({2, 6},\n {\n {1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15},\n }),\n span({2, 3}), DownsampleMethod::kStride),\n Optional(MakeOffsetArray({1, 2}, {\n {1, 4},\n {11, 14},\n })));\n}\nTEST(DownsampleArrayTest, StrideRank2PartialStartBlock) {\n EXPECT_THAT(\n DownsampleArray(MakeOffsetArray({3, 8},\n {\n {1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15},\n }),\n span({2, 3}), DownsampleMethod::kStride),\n Optional(MakeOffsetArray({2, 3}, {\n {7, 10},\n })));\n}\nTEST(DownsampleArrayTest, MeanRank3ThreeDownsampleDims) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({{\n {1, 2, 3, 4},\n {5, 6, 7, 8},\n {9, 10, 11, 12},\n },\n {\n {13, 14, 15, 16},\n {17, 18, 19, 20},\n {21, 22, 23, 24},\n },\n {\n {25, 26, 27, 28},\n {29, 30, 31, 32},\n {33, 34, 35, 36},\n }}),\n span({2, 2, 2}), DownsampleMethod::kMean),\n Optional(MakeArray({{\n {9.5, 11.5},\n {15.5, 17.5},\n },\n {\n {27.5, 29.5},\n {33.5, 35.5},\n }})));\n}\nTEST(DownsampleArrayTest, MeanRank1ReversedExactMultiple) {\n EXPECT_THAT(DownsampleTransformedArray(\n (MakeArray({1, 2, 3, 4}) |\n Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))\n .value(),\n span({2}), DownsampleMethod::kMean),\n Optional(MakeArray({3.5, 1.5})));\n}\nTEST(DownsampleArrayTest, MeanRank1ReversedNotExactMultiple) {\n EXPECT_THAT(DownsampleTransformedArray(\n (MakeArray({1, 2, 3, 4, 5}) |\n Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))\n .value(),\n span({2}), DownsampleMethod::kMean),\n Optional(MakeArray({4.5, 2.5, 1})));\n}\nTEST(DownsampleArrayTest, MeanRank2ReversedNotExactMultiple) {\n EXPECT_THAT(DownsampleTransformedArray(\n (MakeArray({\n {1, 2, 3, 4, 5},\n {6, 7, 8, 9, 10},\n {11, 12, 13, 14, 15},\n }) |\n Dims(0, 1).TranslateSizedInterval(kImplicit, kImplicit, -1))\n .value(),\n span({2, 2}), DownsampleMethod::kMean),\n Optional(MakeArray({\n {12, 10, 8.5},\n {4.5, 2.5, 1},\n })));\n}\nTEST(DownsampleArrayTest, MinRank1ExactMultiple) {\n EXPECT_THAT(DownsampleArray(MakeArray({2, 3, 5, 1}),\n span({2}), DownsampleMethod::kMin),\n Optional(MakeArray({2, 1})));\n EXPECT_THAT(DownsampleArray(MakeArray({2, 3, 8, 7, 1, 5}),\n span({3}), DownsampleMethod::kMin),\n Optional(MakeArray({2, 1})));\n}\nTEST(DownsampleArrayTest, MaxRank1ExactMultiple) {\n EXPECT_THAT(DownsampleArray(MakeArray({2, 3, 5, 1}),\n span({2}), DownsampleMethod::kMax),\n Optional(MakeArray({3, 5})));\n EXPECT_THAT(DownsampleArray(MakeArray({2, 3, 8, 7, 1, 5}),\n span({3}), DownsampleMethod::kMax),\n Optional(MakeArray({8, 7})));\n}\nTEST(DownsampleArrayTest, MedianRank1ExactMultiple) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({100, 3, 1, 2, 99, 98, 97, 5}),\n span({4}), DownsampleMethod::kMedian),\n Optional(MakeArray({2, 97})));\n}\nTEST(DownsampleArrayTest, MedianRank1Partial) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({100, 3, 1, 2, 99, 97, 98}),\n span({4}), DownsampleMethod::kMedian),\n Optional(MakeArray({2, 98})));\n}\nTEST(DownsampleArrayTest, ModeRank1ExactMultiple) {\n EXPECT_THAT(DownsampleArray(MakeArray({100, 99, 99, 99, 3, 3, 2, 2}),\n span({4}), DownsampleMethod::kMode),\n Optional(MakeArray({99, 2})));\n}\nTEST(DownsampleArrayTest, ModeRank1Partial) {\n EXPECT_THAT(DownsampleArray(MakeArray({100, 99, 99, 99, 3, 3, 2}),\n span({4}), DownsampleMethod::kMode),\n Optional(MakeArray({99, 3})));\n}\nTEST(DownsampleArrayTest, ModeBool) {\n EXPECT_THAT(DownsampleArray(MakeArray({0, 0, 1, 1}),\n span({4}), DownsampleMethod::kMode),\n Optional(MakeArray({0})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 1, 1, 1}),\n span({4}), DownsampleMethod::kMode),\n Optional(MakeArray({1})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 0, 1, 1, 1}),\n span({5}), DownsampleMethod::kMode),\n Optional(MakeArray({1})));\n}\nTEST(DownsampleArrayTest, MeanBool) {\n EXPECT_THAT(DownsampleArray(MakeArray({0, 0, 1, 1}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({0})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 1, 1, 1}),\n span({4}), DownsampleMethod::kMean),\n Optional(MakeArray({1})));\n EXPECT_THAT(DownsampleArray(MakeArray({0, 0, 1, 1, 1}),\n span({5}), DownsampleMethod::kMean),\n Optional(MakeArray({1})));\n}\nTEST(DownsampleArrayTest, MedianBool) {\n EXPECT_THAT(\n DownsampleArray(MakeArray({0, 0, 1, 1}), span({4}),\n DownsampleMethod::kMedian),\n Optional(MakeArray({0})));\n EXPECT_THAT(\n DownsampleArray(MakeArray({0, 1, 1, 1}), span({4}),\n DownsampleMethod::kMedian),\n Optional(MakeArray({1})));\n EXPECT_THAT(\n DownsampleArray(MakeArray({0, 0, 1, 1, 1}), span({5}),\n DownsampleMethod::kMedian),\n Optional(MakeArray({1})));\n}\nTEST(DownsampleArrayTest, ModeJson) {\n using ::tensorstore::dtypes::json_t;\n EXPECT_THAT(DownsampleArray(MakeArray({\"a\", \"a\", 3.0, 3, 3u}),\n span({5}), DownsampleMethod::kMode),\n Optional(MakeArray<::nlohmann::json>({json_t(3)})));\n}\nTEST(DownsampleArrayTest, MultipleBlocks) {\n auto source_array = tensorstore::AllocateArray({128, 128});\n auto expected_downsampled = tensorstore::AllocateArray({64, 64});\n for (int i = 0; i < 128; ++i) {\n for (int j = 0; j < 128; ++j) {\n source_array(i, j) = static_cast(i);\n }\n }\n for (int i = 0; i < 64; ++i) {\n for (int j = 0; j < 64; ++j) {\n expected_downsampled(i, j) = static_cast(i * 2);\n }\n }\n EXPECT_THAT(DownsampleArray(source_array, {{2, 2}}, DownsampleMethod::kMean),\n Optional(tensorstore::MatchesArray(expected_downsampled)));\n}\n} "},"Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array.cc"},"Test Code Url":{"kind":"string","value":"https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array_test.cc"},"Commit Hash":{"kind":"string","value":"4f887a6430414cd6088e1743555015b10f116d50"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":2,"numItemsPerPage":100,"numTotalItems":2629,"offset":200,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjE0ODIxOCwic3ViIjoiL2RhdGFzZXRzL0NQUC1VVC1CRU5DSC9jcHBfdW5pdF90ZXN0c19iZW5jaG1hcmtfZGF0YV93aXRoX3NwbGl0cyIsImV4cCI6MTc1NjE1MTgxOCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.SP9WbTfSltSVCU7eMIJZgF0Tjud8Iv1Qb21WRPbUEqGZxy_xNVYhofjbts-idlOqARETXaGYckvKh4ltWDgrBA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
44
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
16
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
127
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
96
203
Commit Hash
stringclasses
13 values
e87f703b-9ab7-41f0-a6af-71cbc58cb452
cpp
google/libaddressinput
address_data
cpp/src/address_data.cc
cpp/test/address_data_test.cc
#include <libaddressinput/address_data.h> #include <libaddressinput/address_field.h> #include <algorithm> #include <cassert> #include <cstddef> #include <ostream> #include <string> #include <vector> #include <re2/re2.h> #include "util/size.h" namespace i18n { namespace addressinput { namespace { std::string AddressData::*kStringField[] = { &AddressData::region_code, &AddressData::administrative_area, &AddressData::locality, &AddressData::dependent_locality, &AddressData::sorting_code, &AddressData::postal_code, nullptr, &AddressData::organization, &AddressData::recipient, }; const std::vector<std::string> AddressData::*kVectorStringField[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, &AddressData::address_line, nullptr, nullptr, }; static_assert(size(kStringField) == size(kVectorStringField), "field_mapping_array_size_mismatch"); bool IsStringEmpty(const std::string& str) { static const RE2 kMatcher(R"(\S)"); return str.empty() || !RE2::PartialMatch(str, kMatcher); } } bool AddressData::IsFieldEmpty(AddressField field) const { assert(field >= 0); assert(static_cast<size_t>(field) < size(kStringField)); if (kStringField[field] != nullptr) { const auto& value = GetFieldValue(field); return IsStringEmpty(value); } else { const auto& value = GetRepeatedFieldValue(field); return std::find_if_not(value.begin(), value.end(), IsStringEmpty) == value.end(); } } const std::string& AddressData::GetFieldValue(AddressField field) const { assert(field >= 0); assert(static_cast<size_t>(field) < size(kStringField)); assert(kStringField[field] != nullptr); return this->*kStringField[field]; } void AddressData::SetFieldValue(AddressField field, const std::string& value) { assert(field >= 0); assert(static_cast<size_t>(field) < size(kStringField)); assert(kStringField[field] != nullptr); (this->*kStringField[field]).assign(value); } const std::vector<std::string>& AddressData::GetRepeatedFieldValue( AddressField field) const { assert(IsRepeatedFieldValue(field)); return this->*kVectorStringField[field]; } bool AddressData::operator==(const AddressData& other) const { return region_code == other.region_code && address_line == other.address_line && administrative_area == other.administrative_area && locality == other.locality && dependent_locality == other.dependent_locality && postal_code == other.postal_code && sorting_code == other.sorting_code && language_code == other.language_code && organization == other.organization && recipient == other.recipient; } bool AddressData::IsRepeatedFieldValue(AddressField field) { assert(field >= 0); assert(static_cast<size_t>(field) < size(kVectorStringField)); return kVectorStringField[field] != nullptr; } } } std::ostream& operator<<(std::ostream& o, const i18n::addressinput::AddressData& address) { o << "region_code: \"" << address.region_code << "\"\n" "administrative_area: \"" << address.administrative_area << "\"\n" "locality: \"" << address.locality << "\"\n" "dependent_locality: \"" << address.dependent_locality << "\"\n" "postal_code: \"" << address.postal_code << "\"\n" "sorting_code: \"" << address.sorting_code << "\"\n"; for (const auto& line : address.address_line) { o << "address_line: \"" << line << "\"\n"; } o << "language_code: \"" << address.language_code << "\"\n" "organization: \"" << address.organization << "\"\n" "recipient: \"" << address.recipient << "\"\n"; return o; }
#include <libaddressinput/address_data.h> #include <libaddressinput/address_field.h> #include <sstream> #include <gtest/gtest.h> namespace { using i18n::addressinput::AddressData; using i18n::addressinput::AddressField; using i18n::addressinput::COUNTRY; using i18n::addressinput::ADMIN_AREA; using i18n::addressinput::LOCALITY; using i18n::addressinput::DEPENDENT_LOCALITY; using i18n::addressinput::SORTING_CODE; using i18n::addressinput::POSTAL_CODE; using i18n::addressinput::STREET_ADDRESS; using i18n::addressinput::ORGANIZATION; using i18n::addressinput::RECIPIENT; TEST(AddressDataTest, GetFieldValue) { const AddressData address{ .region_code = "rrr", .administrative_area = "sss", .locality = "ccc", .dependent_locality = "ddd", .postal_code = "zzz", .sorting_code = "xxx", .organization = "ooo", .recipient = "nnn", }; EXPECT_EQ(address.region_code, address.GetFieldValue(COUNTRY)); EXPECT_EQ(address.administrative_area, address.GetFieldValue(ADMIN_AREA)); EXPECT_EQ(address.locality, address.GetFieldValue(LOCALITY)); EXPECT_EQ(address.dependent_locality, address.GetFieldValue(DEPENDENT_LOCALITY)); EXPECT_EQ(address.sorting_code, address.GetFieldValue(SORTING_CODE)); EXPECT_EQ(address.postal_code, address.GetFieldValue(POSTAL_CODE)); EXPECT_EQ(address.organization, address.GetFieldValue(ORGANIZATION)); EXPECT_EQ(address.recipient, address.GetFieldValue(RECIPIENT)); } TEST(AddressDataTest, GetRepeatedFieldValue) { const AddressData address{.address_line{ "aaa", "222", }}; EXPECT_EQ(address.address_line, address.GetRepeatedFieldValue(STREET_ADDRESS)); } TEST(AddressDataTest, IsFieldEmpty) { AddressData address; EXPECT_TRUE(address.IsFieldEmpty(COUNTRY)); EXPECT_TRUE(address.IsFieldEmpty(ADMIN_AREA)); EXPECT_TRUE(address.IsFieldEmpty(LOCALITY)); EXPECT_TRUE(address.IsFieldEmpty(DEPENDENT_LOCALITY)); EXPECT_TRUE(address.IsFieldEmpty(SORTING_CODE)); EXPECT_TRUE(address.IsFieldEmpty(POSTAL_CODE)); EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS)); EXPECT_TRUE(address.IsFieldEmpty(ORGANIZATION)); EXPECT_TRUE(address.IsFieldEmpty(RECIPIENT)); address = { .region_code = "rrr", .address_line{"aaa"}, .administrative_area = "sss", .locality = "ccc", .dependent_locality = "ddd", .postal_code = "zzz", .sorting_code = "xxx", .organization = "ooo", .recipient = "nnn", }; EXPECT_FALSE(address.IsFieldEmpty(COUNTRY)); EXPECT_FALSE(address.IsFieldEmpty(ADMIN_AREA)); EXPECT_FALSE(address.IsFieldEmpty(LOCALITY)); EXPECT_FALSE(address.IsFieldEmpty(DEPENDENT_LOCALITY)); EXPECT_FALSE(address.IsFieldEmpty(SORTING_CODE)); EXPECT_FALSE(address.IsFieldEmpty(POSTAL_CODE)); EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS)); EXPECT_FALSE(address.IsFieldEmpty(ORGANIZATION)); EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT)); } TEST(AddressDataTest, IsFieldEmptyWhitespace) { AddressData address; address.recipient = " "; EXPECT_TRUE(address.IsFieldEmpty(RECIPIENT)); address.recipient = "abc"; EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT)); address.recipient = " b "; EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT)); } TEST(AddressDataTest, IsFieldEmptyVector) { AddressData address; EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS)); address.address_line.emplace_back(""); EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS)); address.address_line.emplace_back("aaa"); EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS)); address.address_line.emplace_back(""); EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS)); } TEST(AddressDataTest, IsFieldEmptyVectorWhitespace) { AddressData address{.address_line{ " ", " ", " ", }}; EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS)); address.address_line = { "abc", }; EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS)); address.address_line = { " ", " b ", " ", }; EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS)); } TEST(AddressDataTest, StreamFunction) { std::ostringstream oss; const AddressData address{ .region_code = "R", .address_line{ "Line 1", "Line 2", }, .administrative_area = "S", .locality = "C", .dependent_locality = "D", .postal_code = "Z", .sorting_code = "X", .language_code = "zh-Hant", .organization = "O", .recipient = "N", }; oss << address; EXPECT_EQ("region_code: \"R\"\n" "administrative_area: \"S\"\n" "locality: \"C\"\n" "dependent_locality: \"D\"\n" "postal_code: \"Z\"\n" "sorting_code: \"X\"\n" "address_line: \"Line 1\"\n" "address_line: \"Line 2\"\n" "language_code: \"zh-Hant\"\n" "organization: \"O\"\n" "recipient: \"N\"\n", oss.str()); } TEST(AddressDataTest, TestEquals) { const AddressData address{ .region_code = "R", .address_line{ "Line 1", "Line 2", }, .administrative_area = "S", .locality = "C", .dependent_locality = "D", .postal_code = "Z", .sorting_code = "X", .language_code = "zh-Hant", .organization = "O", .recipient = "N", }; AddressData clone = address; EXPECT_EQ(address, clone); clone.language_code.clear(); EXPECT_FALSE(address == clone); } #ifndef NDEBUG TEST(AddressDataTest, GetFieldValueInvalid) { const AddressData address; ASSERT_DEATH_IF_SUPPORTED(address.GetFieldValue(STREET_ADDRESS), "ssertion.*failed"); } TEST(AddressDataTest, GetVectorFieldValueInvalid) { const AddressData address; ASSERT_DEATH_IF_SUPPORTED(address.GetRepeatedFieldValue(COUNTRY), "ssertion.*failed"); } TEST(AddressDataTest, IsFieldEmptyInvalid) { static const auto invalid_field = static_cast<AddressField>(-1); AddressData address; ASSERT_DEATH_IF_SUPPORTED(address.IsFieldEmpty(invalid_field), "ssertion.*failed"); } #endif }
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_data.cc
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_data_test.cc
2610f7b1043d6784ada41392fc9392d1ea09ea07
372fea3a-ecdc-4712-95c9-436ca07dd10c
cpp
tensorflow/tensorflow
winograd_util
tensorflow/lite/delegates/gpu/common/winograd_util.cc
tensorflow/lite/delegates/gpu/common/winograd_util_test.cc
#include "tensorflow/lite/delegates/gpu/common/winograd_util.h" #include <cmath> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { std::vector<float> GetTransposedMatrixForWinograd(int width, int height) { const float kDelta = std::sqrt(2.0f) / 2.0f; std::vector<float> px(width); px[0] = 0.0f; const int points_count = (width - 1) / 2; for (int i = 0; i < points_count; ++i) { px[i * 2 + 1] = kDelta * (i + 1.0f); px[i * 2 + 2] = -kDelta * (i + 1.0f); } px[width - 1] = 1.0f; std::vector<float> py(width, 1.0f); py[width - 1] = 0.0f; std::vector<float> result(height * width); for (int y = 0; y < width; ++y) { for (int x = 0; x < height; ++x) { result[x * width + y] = std::pow(px[y], 1.0f * x) * std::pow(py[y], (height - 1.0f) - x); } } return result; } std::vector<float> GetInversedMatrixForWinograd(int rank) { auto matrix = GetTransposedMatrixForWinograd(rank, rank); std::vector<float> inverted(rank * rank, 0.0f); for (int i = 0; i < rank; ++i) { inverted[i * rank + i] = 1.0f; } for (int i = 1; i < rank - 1; ++i) { float inv_t = 1.0f / matrix[i * rank + i]; for (int x = i; x < rank; ++x) { matrix[i * rank + x] *= inv_t; } for (int x = 0; x < rank; ++x) { inverted[i * rank + x] *= inv_t; } for (int y = 0; y < rank; ++y) { if (y == i) continue; float t = matrix[y * rank + i]; for (int x = i; x < rank; ++x) { matrix[y * rank + x] -= t * matrix[i * rank + x]; } for (int x = 0; x < rank; ++x) { inverted[y * rank + x] -= t * inverted[i * rank + x]; } } } return inverted; } std::vector<float> Multiply(const std::vector<float>& a_mat, const std::vector<float>& b_mat, int m, int n, int k) { std::vector<float> result(m * k); for (int y = 0; y < m; ++y) { for (int x = 0; x < k; ++x) { float sum = 0.0f; for (int i = 0; i < n; ++i) { sum += a_mat[y * n + i] * b_mat[i * k + x]; } result[y * k + x] = sum; } } return result; } } std::vector<float> AtMatrixForWinograd4x4To6x6() { return GetTransposedMatrixForWinograd(6, 4); } std::vector<float> BtMatrixForWinograd4x4To6x6() { return GetInversedMatrixForWinograd(6); } void RearrangeWeightsToWinograd4x4To6x6Weights( const Tensor<OHWI, DataType::FLOAT32>& src_weights, Tensor<OHWI, DataType::FLOAT32>* dst_weights) { OHWI dst_shape; dst_shape.o = src_weights.shape.o; dst_shape.h = 6; dst_shape.w = 6; dst_shape.i = src_weights.shape.i; dst_weights->shape = dst_shape; dst_weights->data.resize(dst_shape.DimensionsProduct()); auto gt_mat = GetTransposedMatrixForWinograd(6, 3); std::vector<float> g_mat(gt_mat.size()); for (int y = 0; y < 3; ++y) { for (int x = 0; x < 6; ++x) { g_mat[x * 3 + y] = gt_mat[y * 6 + x]; } } for (int d = 0; d < src_weights.shape.o; ++d) { for (int s = 0; s < src_weights.shape.i; ++s) { std::vector<float> in_vals(9); for (int y = 0; y < 3; ++y) { for (int x = 0; x < 3; ++x) { const int f_index = src_weights.shape.LinearIndex({d, y, x, s}); in_vals[y * 3 + x] = src_weights.data[f_index]; } } auto temp_vals = Multiply(g_mat, in_vals, 6, 3, 3); auto out_vals = Multiply(temp_vals, gt_mat, 6, 3, 6); for (int y = 0; y < 6; ++y) { for (int x = 0; x < 6; ++x) { const int f_index = dst_shape.LinearIndex({d, y, x, s}); dst_weights->data[f_index] = out_vals[y * 6 + x]; } } } } } bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr) { return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 && attr.dilations == HW(1, 1) && attr.strides == HW(1, 1) && attr.groups == 1; } } }
#include "tensorflow/lite/delegates/gpu/common/winograd_util.h" #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" namespace tflite { namespace gpu { TEST(Winograd, CorrectAttributesFor4x4To6x6) { Convolution2DAttributes attr; attr.padding.prepended = HW(1, 2); attr.padding.appended = HW(0, 1); attr.strides = HW(1, 1); attr.dilations = HW(1, 1); attr.weights.shape = OHWI(1, 3, 3, 1); EXPECT_TRUE(IsSuitableForWinograd4x4To6x6(attr)); } TEST(Winograd, IncorrectAttributesFor4x4To6x6) { Convolution2DAttributes attr; attr.padding.prepended = HW(1, 2); attr.padding.appended = HW(0, 1); attr.strides = HW(1, 1); attr.dilations = HW(1, 1); attr.weights.shape = OHWI(1, 2, 3, 1); EXPECT_FALSE(IsSuitableForWinograd4x4To6x6(attr)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
aa4eaeed-d89a-44ac-8a40-cbeca40aa15a
cpp
tensorflow/tensorflow
resource_variable_grad
tensorflow/cc/gradients/resource_variable_grad.cc
tensorflow/cc/gradients/resource_variable_grad_test.cc
#include <vector> #include "tensorflow/cc/framework/grad_op_registry.h" #include "tensorflow/cc/framework/gradients.h" #include "tensorflow/cc/ops/array_ops.h" namespace tensorflow { namespace ops { namespace { Status ReadVariableOpGrad(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) { grad_outputs->push_back(Identity(scope, grad_inputs[0])); return scope.status(); } REGISTER_GRADIENT_OP("ReadVariableOp", ReadVariableOpGrad); } } }
#include <iostream> #include "tensorflow/cc/client/client_session.h" #include "tensorflow/cc/framework/grad_op_registry.h" #include "tensorflow/cc/framework/gradient_checker.h" #include "tensorflow/cc/framework/gradients.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/framework/testutil.h" #include "tensorflow/cc/gradients/grad_testutil.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace ops { namespace { TEST(ResourceVariableGradTest, ReadVariableOpGrad) { TensorShape shape({}); auto scope = Scope::NewRootScope(); auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape)); auto var = VarHandleOp(scope, DT_FLOAT, shape); auto init = AssignVariableOp(scope, var, Const(scope, 2.0f, shape)); auto temp = ReadVariableOp(scope, var, DT_FLOAT); auto y = Mul(scope, temp, x); auto dy = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape)); OutputList dxs; TF_ASSERT_OK(AddSymbolicGradients(scope, {y}, {var}, {dy}, &dxs)); ClientSession::FeedType feed_list; feed_list.insert({x, 5.0f}); feed_list.insert({dy, 1.0f}); std::vector<Tensor> dxout; ClientSession session(scope); TF_ASSERT_OK(session.Run(feed_list, dxs, &dxout)); auto grad = dxout[0].scalar<float>()(); EXPECT_EQ(grad, 5.0f); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c0e9b00c-577d-4692-b0de-b284f166c3d1
cpp
tensorflow/tensorflow
sampler
third_party/xla/xla/tsl/lib/monitoring/sampler.cc
tensorflow/core/lib/monitoring/sampler_test.cc
#include "xla/tsl/lib/monitoring/sampler.h" #include "absl/log/check.h" #ifdef IS_MOBILE_PLATFORM #else namespace tsl { namespace monitoring { namespace { class ExplicitBuckets : public Buckets { public: ~ExplicitBuckets() override = default; explicit ExplicitBuckets(std::vector<double> bucket_limits) : bucket_limits_(std::move(bucket_limits)) { CHECK_GT(bucket_limits_.size(), 0); for (size_t i = 1; i < bucket_limits_.size(); i++) { CHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]); } if (bucket_limits_.back() != DBL_MAX) { bucket_limits_.push_back(DBL_MAX); } } const std::vector<double>& explicit_bounds() const override { return bucket_limits_; } private: std::vector<double> bucket_limits_; ExplicitBuckets(const ExplicitBuckets&) = delete; void operator=(const ExplicitBuckets&) = delete; }; class ExponentialBuckets : public Buckets { public: ~ExponentialBuckets() override = default; ExponentialBuckets(double scale, double growth_factor, int bucket_count) : explicit_buckets_( ComputeBucketLimits(scale, growth_factor, bucket_count)) {} const std::vector<double>& explicit_bounds() const override { return explicit_buckets_.explicit_bounds(); } private: static std::vector<double> ComputeBucketLimits(double scale, double growth_factor, int bucket_count) { CHECK_GT(bucket_count, 0); std::vector<double> bucket_limits; double bound = scale; for (int i = 0; i < bucket_count; i++) { bucket_limits.push_back(bound); bound *= growth_factor; } return bucket_limits; } ExplicitBuckets explicit_buckets_; ExponentialBuckets(const ExponentialBuckets&) = delete; void operator=(const ExponentialBuckets&) = delete; }; } std::unique_ptr<Buckets> Buckets::Explicit(std::vector<double> bucket_limits) { return std::unique_ptr<Buckets>( new ExplicitBuckets(std::move(bucket_limits))); } std::unique_ptr<Buckets> Buckets::Explicit( std::initializer_list<double> bucket_limits) { return std::unique_ptr<Buckets>(new ExplicitBuckets(bucket_limits)); } std::unique_ptr<Buckets> Buckets::Exponential(double scale, double growth_factor, int bucket_count) { return std::unique_ptr<Buckets>( new ExponentialBuckets(scale, growth_factor, bucket_count)); } } } #endif
#include "tensorflow/core/lib/monitoring/sampler.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace monitoring { namespace { using histogram::Histogram; void EqHistograms(const Histogram& expected, const HistogramProto& actual_proto) { Histogram actual; ASSERT_TRUE(actual.DecodeFromProto(actual_proto)); EXPECT_EQ(expected.ToString(), actual.ToString()); } auto* sampler_with_labels = Sampler<1>::New({"/tensorflow/test/sampler_with_labels", "Sampler with one label.", "MyLabel"}, Buckets::Explicit({10.0, 20.0})); TEST(LabeledSamplerTest, InitializedEmpty) { Histogram empty; EqHistograms(empty, sampler_with_labels->GetCell("Empty")->value()); } TEST(LabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({10.0, 20.0, DBL_MAX}); auto* cell = sampler_with_labels->GetCell("BucketBoundaries"); sampler_with_labels->GetCell("AddedToCheckPreviousCellValidity"); cell->Add(-1.0); expected.Add(-1.0); cell->Add(10.0); expected.Add(10.0); cell->Add(20.0); expected.Add(20.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } auto* init_sampler_without_labels = Sampler<0>::New({"/tensorflow/test/init_sampler_without_labels", "Sampler without labels initialized as empty."}, Buckets::Explicit(std::vector<double>{1.5, 2.8})); TEST(UnlabeledSamplerTest, InitializedEmpty) { Histogram empty; EqHistograms(empty, init_sampler_without_labels->GetCell()->value()); } auto* sampler_without_labels = Sampler<0>::New({"/tensorflow/test/sampler_without_labels", "Sampler without labels initialized as empty."}, Buckets::Explicit({1.5, 2.8})); TEST(UnlabeledSamplerTest, ExplicitBucketBoundaries) { Histogram expected({1.5, 2.8, DBL_MAX}); auto* cell = sampler_without_labels->GetCell(); cell->Add(-1.0); expected.Add(-1.0); cell->Add(2.0); expected.Add(2.0); cell->Add(31.0); expected.Add(31.0); EqHistograms(expected, cell->value()); } auto* sampler_with_exponential = Sampler<1>::New({"/tensorflow/test/sampler_with_exponential", "Sampler with exponential buckets.", "MyLabel"}, Buckets::Exponential(1, 2, 3)); TEST(ExponentialSamplerTest, ExponentialBucketBoundaries) { Histogram expected({1.0, 2.0, 4.0, DBL_MAX}); auto* cell = sampler_with_exponential->GetCell("BucketBoundaries"); sampler_with_exponential->GetCell("AddedToCheckPreviousCellValidity"); cell->Add(-1.0); expected.Add(-1.0); cell->Add(0.5); expected.Add(0.5); cell->Add(1.001); expected.Add(1.001); cell->Add(3.999); expected.Add(3.999); cell->Add(6.0); expected.Add(6.0); EqHistograms(expected, cell->value()); } TEST(ExplicitSamplerTest, SameName) { auto* same_sampler = Sampler<1>::New({"/tensorflow/test/sampler_with_labels", "Sampler with one label.", "MyLabel"}, Buckets::Explicit({10.0, 20.0})); EXPECT_TRUE(sampler_with_labels->GetStatus().ok()); EXPECT_TRUE(same_sampler->GetStatus().ok()); delete same_sampler; } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/monitoring/sampler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/sampler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
61928d7e-efb0-4f2c-8acc-2f59f7b04420
cpp
google/tensorstore
key
tensorstore/kvstore/zarr3_sharding_indexed/key.cc
tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <cassert> #include <cstring> #include <optional> #include <string> #include <string_view> #include <utility> #include "absl/base/internal/endian.h" #include "absl/status/status.h" #include "tensorstore/contiguous_layout.h" #include "tensorstore/index.h" #include "tensorstore/kvstore/key_range.h" #include "tensorstore/rank.h" #include "tensorstore/util/extents.h" #include "tensorstore/util/quote_string.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace zarr3_sharding_indexed { std::string IndicesToKey(span<const Index> grid_cell_indices) { std::string key; key.resize(grid_cell_indices.size() * 4); for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) { absl::big_endian::Store32(key.data() + i * 4, grid_cell_indices[i]); } return key; } bool KeyToIndices(std::string_view key, span<Index> grid_cell_indices) { if (key.size() != grid_cell_indices.size() * 4) { return false; } for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) { grid_cell_indices[i] = absl::big_endian::Load32(key.data() + i * 4); } return true; } std::optional<EntryId> KeyToEntryId(std::string_view key, span<const Index> grid_shape) { const DimensionIndex rank = grid_shape.size(); if (rank * sizeof(uint32_t) != key.size()) return {}; EntryId id = 0; for (DimensionIndex i = 0; i < rank; ++i) { auto index = absl::big_endian::Load32(key.data() + i * 4); if (index >= grid_shape[i]) return {}; id *= grid_shape[i]; id += index; } return id; } Result<EntryId> KeyToEntryIdOrError(std::string_view key, span<const Index> grid_shape) { if (auto entry_id = KeyToEntryId(key, grid_shape)) { return *entry_id; } return absl::InvalidArgumentError( tensorstore::StrCat("Invalid key (grid_shape=", grid_shape, "): ", tensorstore::QuoteString(key))); } std::string EntryIdToKey(EntryId entry_id, span<const Index> grid_shape) { std::string key; key.resize(grid_shape.size() * 4); for (DimensionIndex i = grid_shape.size(); i--;) { const Index size = grid_shape[i]; absl::big_endian::Store32(key.data() + i * 4, entry_id % size); entry_id /= size; } return key; } EntryId LowerBoundToEntryId(std::string_view key, span<const Index> grid_shape) { char key_padded[kMaxRank * 4]; const size_t full_key_size = grid_shape.size() * 4; const size_t key_bytes_to_copy = std::min(full_key_size, key.size()); std::memcpy(key_padded, key.data(), key_bytes_to_copy); std::memset(key_padded + key_bytes_to_copy, 0, full_key_size - key_bytes_to_copy); EntryId entry_id = 0; EntryId remaining_indices_mask = ~static_cast<EntryId>(0); EntryId max_entry_id = 1; for (DimensionIndex i = 0; i < grid_shape.size(); ++i) { const EntryId size = grid_shape[i]; max_entry_id *= size; EntryId index = absl::big_endian::Load32(&key_padded[i * 4]); entry_id *= size; if (index >= size) { entry_id += (size & remaining_indices_mask); remaining_indices_mask = 0; } else { entry_id += (index & remaining_indices_mask); } } assert(entry_id <= max_entry_id); if (key.size() > full_key_size) { if (entry_id < max_entry_id) { ++entry_id; } } return entry_id; } std::pair<EntryId, EntryId> KeyRangeToEntryRange(std::string_view inclusive_min, std::string_view exclusive_max, span<const Index> grid_shape) { EntryId lower_bound = LowerBoundToEntryId(inclusive_min, grid_shape); EntryId upper_bound; if (exclusive_max.empty()) { upper_bound = static_cast<EntryId>(ProductOfExtents(grid_shape)); } else { upper_bound = LowerBoundToEntryId(exclusive_max, grid_shape); } return {lower_bound, upper_bound}; } EntryId InternalKeyLowerBoundToEntryId(std::string_view key, int64_t num_entries_per_shard) { char key_bytes[4] = {}; std::memcpy(key_bytes, key.data(), std::min(static_cast<size_t>(4), key.size())); EntryId entry_id = absl::big_endian::Load32(key_bytes); if (entry_id > num_entries_per_shard) { entry_id = num_entries_per_shard; } if (key.size() > 4 && entry_id < num_entries_per_shard) { ++entry_id; } return entry_id; } std::pair<EntryId, EntryId> InternalKeyRangeToEntryRange( std::string_view inclusive_min, std::string_view exclusive_max, int64_t num_entries_per_shard) { return {InternalKeyLowerBoundToEntryId(inclusive_min, num_entries_per_shard), exclusive_max.empty() ? EntryId(num_entries_per_shard) : InternalKeyLowerBoundToEntryId( exclusive_max, num_entries_per_shard)}; } std::string EntryIdToInternalKey(EntryId entry_id) { std::string key; key.resize(4); absl::big_endian::Store32(key.data(), entry_id); return key; } EntryId InternalKeyToEntryId(std::string_view key) { assert(key.size() == 4); return static_cast<EntryId>(absl::big_endian::Load32(key.data())); } KeyRange KeyRangeToInternalKeyRange(const KeyRange& range, span<const Index> grid_shape) { auto [inclusive_min_entry, exclusive_max_entry] = KeyRangeToEntryRange( range.inclusive_min, range.exclusive_max, grid_shape); return KeyRange{EntryIdToInternalKey(inclusive_min_entry), EntryIdToInternalKey(exclusive_max_entry)}; } std::string DescribeEntryId(EntryId entry_id, span<const Index> grid_shape) { Index indices[kMaxRank]; span<Index> indices_span(&indices[0], grid_shape.size()); GetContiguousIndices<c_order, Index>(entry_id, grid_shape, indices_span); return tensorstore::StrCat("shard entry ", indices_span, "/", grid_shape); } std::string DescribeKey(std::string_view key, span<const Index> grid_shape) { if (auto entry_id = KeyToEntryId(key, grid_shape)) { return DescribeEntryId(*entry_id, grid_shape); } return tensorstore::StrCat("invalid shard entry ", tensorstore::QuoteString(key), "/", grid_shape); } std::string DescribeInternalKey(std::string_view key, span<const Index> grid_shape) { return DescribeEntryId(InternalKeyToEntryId(key), grid_shape); } } }
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h" #include <optional> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/index.h" #include "tensorstore/kvstore/key_range.h" namespace { using ::tensorstore::Index; using ::tensorstore::KeyRange; using ::tensorstore::zarr3_sharding_indexed::EntryId; using ::tensorstore::zarr3_sharding_indexed::EntryIdToInternalKey; using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey; using ::tensorstore::zarr3_sharding_indexed::IndicesToKey; using ::tensorstore::zarr3_sharding_indexed::InternalKeyLowerBoundToEntryId; using ::tensorstore::zarr3_sharding_indexed::InternalKeyRangeToEntryRange; using ::tensorstore::zarr3_sharding_indexed::InternalKeyToEntryId; using ::tensorstore::zarr3_sharding_indexed::KeyRangeToEntryRange; using ::tensorstore::zarr3_sharding_indexed::KeyRangeToInternalKeyRange; using ::tensorstore::zarr3_sharding_indexed::KeyToEntryId; using ::tensorstore::zarr3_sharding_indexed::KeyToIndices; using ::tensorstore::zarr3_sharding_indexed::LowerBoundToEntryId; TEST(KeyToEntryIdTest, Basic) { EntryId entry_id = 1 * 5 * 6 + 2 * 6 + 3; std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}; Index grid_shape[] = {4, 5, 6}; EXPECT_THAT(KeyToEntryId(key, grid_shape), ::testing::Optional(entry_id)); EXPECT_THAT(EntryIdToKey(entry_id, grid_shape), ::testing::Eq(key)); } TEST(KeyToEntryIdTest, OutOfRange) { EXPECT_THAT(KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 3}, {{4, 5, 6}}), ::testing::Eq(std::nullopt)); } TEST(KeyToEntryIdTest, Invalid) { EXPECT_THAT( KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, {{4, 5, 6}}), ::testing::Eq(std::nullopt)); } TEST(IndicesToKeyTest, Basic) { const Index indices[] = {1, 2, 3}; std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}; EXPECT_THAT(IndicesToKey(indices), ::testing::Eq(key)); Index decoded_indices[3]; EXPECT_TRUE(KeyToIndices(key, decoded_indices)); EXPECT_THAT(decoded_indices, ::testing::ElementsAreArray(indices)); EXPECT_FALSE(KeyToIndices(key.substr(1), decoded_indices)); } TEST(LowerBoundToEntryId, Exact) { Index grid_shape[] = {4, 5, 6}; EXPECT_THAT(LowerBoundToEntryId( std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}, grid_shape), ::testing::Eq(1 * 5 * 6 + 2 * 6 + 3)); } TEST(LowerBoundToEntryId, Longer) { Index grid_shape[] = {4, 5, 6}; EXPECT_THAT( LowerBoundToEntryId(std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0}, grid_shape), ::testing::Eq(1 * 5 * 6 + 2 * 6 + 4)); } TEST(KeyRangeToEntryRange, Full) { Index grid_shape[] = {4, 5, 6}; EXPECT_THAT(KeyRangeToEntryRange("", "", grid_shape), ::testing::Pair(0, 4 * 5 * 6)); } TEST(KeyRangeToEntryRange, Partial) { Index grid_shape[] = {4, 5, 6}; EXPECT_THAT( KeyRangeToEntryRange( std::string{ 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, }, std::string{ 0, 0, 0, 2, 0, 0, 0, 4, 0, 0, 0, 5, }, grid_shape), ::testing::Pair(2 * (5 * 6) + 3 * 6 + 4, 2 * (5 * 6) + 4 * 6 + 5)); EXPECT_THAT(KeyRangeToInternalKeyRange(KeyRange{std::string{ 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, }, std::string{ 0, 0, 0, 2, 0, 0, 0, 4, 0, 0, 0, 5, }}, grid_shape), KeyRange(EntryIdToInternalKey(2 * (5 * 6) + 3 * 6 + 4), EntryIdToInternalKey(2 * (5 * 6) + 4 * 6 + 5))); } TEST(EntryIdToInternalKeyTest, Basic) { EntryId entry_id = 0x01020304; std::string internal_key{0x01, 0x02, 0x03, 0x04}; EXPECT_THAT(EntryIdToInternalKey(entry_id), ::testing::Eq(internal_key)); EXPECT_THAT(InternalKeyToEntryId(internal_key), ::testing::Eq(entry_id)); } TEST(InternalKeyLowerBoundToEntryIdTest, Basic) { EXPECT_THAT(InternalKeyLowerBoundToEntryId( std::string{0x01, 0x02, 0x03, 0x04}, 0x88888888), ::testing::Eq(0x01020304)); EXPECT_THAT(InternalKeyLowerBoundToEntryId( std::string{0x01, 0x02, 0x03, 0x04, 0x0}, 0x88888888), ::testing::Eq(0x01020304 + 1)); EXPECT_THAT( InternalKeyLowerBoundToEntryId(std::string{0x01, 0x02, 0x03}, 0x88888888), ::testing::Eq(0x01020300)); EXPECT_THAT(InternalKeyLowerBoundToEntryId( std::string{0x01, 0x02, 0x03, 0x04}, 0x01020302), ::testing::Eq(0x01020302)); } TEST(InternalKeyRangeToEntryRange, Basic) { EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04}, std::string{0x01, 0x02, 0x03, 0x07}, 0x88888888), ::testing::Pair(0x01020304, 0x01020307)); EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04}, {}, 0x88888888), ::testing::Pair(0x01020304, 0x88888888)); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc
4f887a6430414cd6088e1743555015b10f116d50
40874743-137f-4d51-be46-efd8d9dcd11c
cpp
tensorflow/tensorflow
unidirectional_sequence_lstm
tensorflow/lite/kernels/unidirectional_sequence_lstm.cc
tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc
#include <math.h> #include <algorithm> #include <cstddef> #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/kernel_utils.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/lstm_eval.h" #include "tensorflow/lite/kernels/lstm_shared.h" namespace tflite { namespace ops { namespace builtin { namespace unidirectional_sequence_lstm { namespace { struct OpData { bool use_layer_norm; int scratch_tensor_index; bool compute_row_sums = false; bool recurrent_to_input_is_diag = false; bool recurrent_to_forget_is_diag = false; bool recurrent_to_cell_is_diag = false; bool recurrent_to_output_is_diag = false; lstm_eval::IntegerLstmParameter integer_lstm_param; }; TfLiteStatus PopulateQuantizedLstmParams8x8_16( TfLiteContext* context, TfLiteNode* node, lstm_eval::IntegerLstmParameter* integer_lstm_param) { const auto* params = static_cast<TfLiteUnidirectionalSequenceLSTMParams*>(node->builtin_data); const float cell_clip = params->cell_clip; const float proj_clip = params->proj_clip; const TfLiteTensor* cell_state = GetVariableInput(context, node, lstm::full::kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); TfLiteTensor* output_tensor; TF_LITE_ENSURE_OK( context, GetOutputSafe(context, node, lstm::full::kOutputTensor, &output_tensor)); TF_LITE_ENSURE(context, cell_state->quantization.type != kTfLiteNoQuantization); auto* cell_state_params = static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params); TF_LITE_ENSURE(context, output_tensor->quantization.type != kTfLiteNoQuantization); auto* proj_params = static_cast<TfLiteAffineQuantization*>( output_tensor->quantization.params); if (cell_clip > 0.0) { integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min( std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f), 32767.0f)); } else { integer_lstm_param->quantized_cell_clip = 0; } if (proj_clip > 0.0) { integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min( std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f)); } else { integer_lstm_param->quantized_proj_clip = 0; } OpData* op_data = static_cast<OpData*>(node->user_data); const bool use_layer_norm = op_data->use_layer_norm; const TfLiteTensor* input; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputTensor, &input)); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor, &input_to_forget_weights)); const TfLiteTensor* input_to_cell_weights; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kInputToCellWeightsTensor, &input_to_cell_weights)); const TfLiteTensor* input_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor, &input_to_output_weights)); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor, &recurrent_to_forget_weights)); const TfLiteTensor* recurrent_to_cell_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor, &recurrent_to_cell_weights)); const TfLiteTensor* recurrent_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor, &recurrent_to_output_weights)); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kOutputLayerNormCoefficientsTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor( context, node, lstm::full::kProjectionWeightsTensor); TfLiteTensor* output_state = GetVariableInput(context, node, lstm::full::kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); const bool use_cifg = (input_to_input_weights == nullptr); const bool use_peephole = (cell_to_output_weights != nullptr); const bool use_projection = (projection_weights != nullptr); std::vector<float> intermediate_scale; std::vector<int32> intermediate_zp; for (int i = 0; i < 4; ++i) { if (use_layer_norm) { TfLiteTensor* intermediate; TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, i, &intermediate)); TF_LITE_ENSURE(context, intermediate->quantization.type != kTfLiteNoQuantization); auto* params = static_cast<TfLiteAffineQuantization*>( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); intermediate_zp.push_back(params->zero_point->data[0]); } else { intermediate_scale.push_back(std::pow(2, -12)); intermediate_zp.push_back(0); } } TfLiteTensor* hidden; TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, 4, &hidden)); TF_LITE_ENSURE(context, hidden->quantization.type != kTfLiteNoQuantization); auto* hidden_params = static_cast<TfLiteAffineQuantization*>(hidden->quantization.params); intermediate_scale.push_back(hidden_params->scale->data[0]); intermediate_zp.push_back(hidden_params->zero_point->data[0]); const float default_scale = 1.0; float input_scale = default_scale; float input_to_input_weight_scale = default_scale; float recurrent_to_input_weight_scale = default_scale; float cell_to_input_weight_scale = default_scale; float input_to_forget_weight_scale = default_scale; float recurrent_to_forget_weight_scale = default_scale; float cell_to_forget_weight_scale = default_scale; float input_to_cell_weight_scale = default_scale; float recurrent_to_cell_weight_scale = default_scale; float input_to_output_weight_scale = default_scale; float recurrent_to_output_weight_scale = default_scale; float cell_to_output_weight_scale = default_scale; float projection_weight_scale = default_scale; float layer_norm_input_scale = default_scale; float layer_norm_forget_scale = default_scale; float layer_norm_cell_scale = default_scale; float layer_norm_output_scale = default_scale; float output_state_scale = default_scale; int cell_scale = 1; float effective_input_to_input_scale = default_scale; float effective_recurrent_to_input_scale = default_scale; float effective_cell_to_input_scale = default_scale; float effective_input_to_forget_scale = default_scale; float effective_recurrent_to_forget_scale = default_scale; float effective_cell_to_forget_scale = default_scale; float effective_input_to_cell_scale = default_scale; float effective_recurrent_to_cell_scale = default_scale; float effective_input_to_output_scale = default_scale; float effective_recurrent_to_output_scale = default_scale; float effective_cell_to_output_scale = default_scale; float effective_proj_scale = default_scale; float effective_hidden_scale = default_scale; if (!use_cifg) { input_to_input_weight_scale = input_to_input_weights->params.scale; recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale; } if (use_peephole) { if (!use_cifg) { cell_to_input_weight_scale = cell_to_input_weights->params.scale; } cell_to_forget_weight_scale = cell_to_forget_weights->params.scale; cell_to_output_weight_scale = cell_to_output_weights->params.scale; } if (use_layer_norm) { if (!use_cifg) { layer_norm_input_scale = input_layer_norm_coefficients->params.scale; } layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale; layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale; layer_norm_output_scale = output_layer_norm_coefficients->params.scale; } if (use_projection) { projection_weight_scale = projection_weights->params.scale; } output_state_scale = output_state->params.scale; input_to_forget_weight_scale = input_to_forget_weights->params.scale; input_to_cell_weight_scale = input_to_cell_weights->params.scale; input_to_output_weight_scale = input_to_output_weights->params.scale; recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale; recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale; recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale; TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale)); integer_lstm_param->cell_scale = cell_scale; input_scale = input->params.scale; if (!use_cifg) { effective_input_to_input_scale = input_to_input_weight_scale * input_scale / intermediate_scale[0]; effective_recurrent_to_input_scale = recurrent_to_input_weight_scale * output_state_scale / intermediate_scale[0]; } effective_input_to_forget_scale = input_to_forget_weight_scale * input_scale / intermediate_scale[1]; effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale * output_state_scale / intermediate_scale[1]; effective_input_to_cell_scale = input_to_cell_weight_scale * input_scale / intermediate_scale[2]; effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale * output_state_scale / intermediate_scale[2]; effective_input_to_output_scale = input_to_output_weight_scale * input_scale / intermediate_scale[3]; effective_recurrent_to_output_scale = recurrent_to_output_weight_scale * output_state_scale / intermediate_scale[3]; effective_hidden_scale = std::pow(2, -15) / intermediate_scale[4] * std::pow(2, -15); effective_proj_scale = projection_weight_scale * intermediate_scale[4] / output_state_scale; if (use_peephole) { if (!use_cifg) { effective_cell_to_input_scale = std::pow(2, cell_scale) * cell_to_input_weight_scale / intermediate_scale[0]; } effective_cell_to_forget_scale = std::pow(2, cell_scale) * cell_to_forget_weight_scale / intermediate_scale[1]; effective_cell_to_output_scale = std::pow(2, cell_scale) * cell_to_output_weight_scale / intermediate_scale[3]; } QuantizeMultiplier(effective_input_to_input_scale, &integer_lstm_param->effective_input_to_input_scale_a, &integer_lstm_param->effective_input_to_input_scale_b); QuantizeMultiplier(effective_recurrent_to_input_scale, &integer_lstm_param->effective_recurrent_to_input_scale_a, &integer_lstm_param->effective_recurrent_to_input_scale_b); QuantizeMultiplier(effective_cell_to_input_scale, &integer_lstm_param->effective_cell_to_input_scale_a, &integer_lstm_param->effective_cell_to_input_scale_b); QuantizeMultiplier(effective_input_to_forget_scale, &integer_lstm_param->effective_input_to_forget_scale_a, &integer_lstm_param->effective_input_to_forget_scale_b); QuantizeMultiplier( effective_recurrent_to_forget_scale, &integer_lstm_param->effective_recurrent_to_forget_scale_a, &integer_lstm_param->effective_recurrent_to_forget_scale_b); QuantizeMultiplier(effective_cell_to_forget_scale, &integer_lstm_param->effective_cell_to_forget_scale_a, &integer_lstm_param->effective_cell_to_forget_scale_b); QuantizeMultiplier(effective_input_to_cell_scale, &integer_lstm_param->effective_input_to_cell_scale_a, &integer_lstm_param->effective_input_to_cell_scale_b); QuantizeMultiplier(effective_recurrent_to_cell_scale, &integer_lstm_param->effective_recurrent_to_cell_scale_a, &integer_lstm_param->effective_recurrent_to_cell_scale_b); QuantizeMultiplier(effective_input_to_output_scale, &integer_lstm_param->effective_input_to_output_scale_a, &integer_lstm_param->effective_input_to_output_scale_b); QuantizeMultiplier( effective_recurrent_to_output_scale, &integer_lstm_param->effective_recurrent_to_output_scale_a, &integer_lstm_param->effective_recurrent_to_output_scale_b); QuantizeMultiplier(effective_cell_to_output_scale, &integer_lstm_param->effective_cell_to_output_scale_a, &integer_lstm_param->effective_cell_to_output_scale_b); QuantizeMultiplier(effective_proj_scale, &integer_lstm_param->effective_proj_scale_a, &integer_lstm_param->effective_proj_scale_b); QuantizeMultiplier(effective_hidden_scale, &integer_lstm_param->effective_hidden_scale_a, &integer_lstm_param->effective_hidden_scale_b); QuantizeMultiplier(layer_norm_input_scale, &integer_lstm_param->layer_norm_input_scale_a, &integer_lstm_param->layer_norm_input_scale_b); QuantizeMultiplier(layer_norm_forget_scale, &integer_lstm_param->layer_norm_forget_scale_a, &integer_lstm_param->layer_norm_forget_scale_b); QuantizeMultiplier(layer_norm_cell_scale, &integer_lstm_param->layer_norm_cell_scale_a, &integer_lstm_param->layer_norm_cell_scale_b); QuantizeMultiplier(layer_norm_output_scale, &integer_lstm_param->layer_norm_output_scale_a, &integer_lstm_param->layer_norm_output_scale_b); integer_lstm_param->hidden_zp = intermediate_zp[4]; if (!use_cifg) { integer_lstm_param->input_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_input_scale)); } integer_lstm_param->forget_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_forget_scale)); integer_lstm_param->cell_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_cell_scale)); integer_lstm_param->output_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_output_scale)); return kTfLiteOk; } } enum TemporaryTensor { kScratchBuffer = 0, kInputQuantized = 1, kOutputStateQuantized = 2, kCellStateQuantized = 3, kInputScalingFactors = 4, kOutputStateScalingFactors = 5, kProductScalingFactors = 6, kRecoveredCellWeights = 7, kAccumScratch = 8, kInputZeroPoints = 9, kOutputStateZeroPoints = 10, kRowSums = 11, kNumTemporaryTensors = 12, }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* op_data = new OpData(); context->AddTensors(context, kNumTemporaryTensors, &op_data->scratch_tensor_index); return op_data; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TfLiteNode* node, int n_input, int n_output, int n_cell, bool use_layer_norm, bool is_integer) { const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data); TF_LITE_ENSURE(context, params->cell_clip >= 0); TF_LITE_ENSURE(context, params->proj_clip >= 0); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kInputToInputWeightsTensor); if (input_to_input_weights != nullptr) { TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input); } const TfLiteTensor* input_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor, &input_to_forget_weights)); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input); const TfLiteTensor* input_to_cell_weights; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kInputToCellWeightsTensor, &input_to_cell_weights)); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kRecurrentToInputWeightsTensor); if (recurrent_to_input_weights != nullptr) { bool recurrent_to_input_is_diag = recurrent_to_input_weights->dims->size == 1; if (recurrent_to_input_is_diag) { TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 1); } else { TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1], n_output); TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_input_weights->type, input_to_forget_weights->type); } TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0], n_cell); } const TfLiteTensor* recurrent_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor, &recurrent_to_forget_weights)); bool recurrent_to_forget_is_diag = recurrent_to_forget_weights->dims->size == 1; if (recurrent_to_forget_is_diag) { TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 1); } else { TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1], n_output); TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_forget_weights->type, input_to_forget_weights->type); } const TfLiteTensor* recurrent_to_cell_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor, &recurrent_to_cell_weights)); bool recurrent_to_cell_is_diag = recurrent_to_cell_weights->dims->size == 1; if (recurrent_to_cell_is_diag) { TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 1); } else { TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1], n_output); TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_cell_weights->type, input_to_forget_weights->type); } const bool cifg_weights_all_or_none = ((input_to_input_weights != nullptr) && (recurrent_to_input_weights != nullptr)) || ((input_to_input_weights == nullptr) && (recurrent_to_input_weights == nullptr)); TF_LITE_ENSURE(context, cifg_weights_all_or_none == true); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToInputWeightsTensor); if (cell_to_input_weights != nullptr) { TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell); TF_LITE_ENSURE_TYPES_EQ( context, cell_to_input_weights->type, is_integer ? kTfLiteInt16 : input_to_forget_weights->type); } const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToForgetWeightsTensor); if (cell_to_forget_weights != nullptr) { TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell); TF_LITE_ENSURE_TYPES_EQ( context, cell_to_forget_weights->type, is_integer ? kTfLiteInt16 : input_to_forget_weights->type); } const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToOutputWeightsTensor); if (cell_to_output_weights != nullptr) { TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell); TF_LITE_ENSURE_TYPES_EQ( context, cell_to_output_weights->type, is_integer ? kTfLiteInt16 : input_to_forget_weights->type); } const bool use_cifg = (input_to_input_weights == nullptr); const bool peephole_weights_all_or_none = ((cell_to_input_weights != nullptr || use_cifg) && (cell_to_forget_weights != nullptr) && (cell_to_output_weights != nullptr)) || ((cell_to_input_weights == nullptr) && (cell_to_forget_weights == nullptr) && (cell_to_output_weights == nullptr)); TF_LITE_ENSURE(context, peephole_weights_all_or_none == true); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor); if (use_cifg) { TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr); } else { TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteInt32); } else { TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteFloat32); } } const TfLiteTensor* forget_gate_bias; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor, &forget_gate_bias)); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteInt32); } else { TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteFloat32); } const TfLiteTensor* cell_gate_bias; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kCellGateBiasTensor, &cell_gate_bias)); TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteInt32); } else { TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteFloat32); } const TfLiteTensor* output_gate_bias; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor, &output_gate_bias)); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteInt32); } else { TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteFloat32); } const TfLiteTensor* projection_weights = GetOptionalInputTensor( context, node, lstm::full::kProjectionWeightsTensor); if (projection_weights != nullptr) { TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output); TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell); } const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor); if (projection_bias != nullptr) { TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteInt32); } else { TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteFloat32); } } const bool projecton_tensors_consistent = ((projection_weights != nullptr) || (projection_bias == nullptr)); TF_LITE_ENSURE(context, projecton_tensors_consistent == true); if (use_layer_norm) { const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kInputLayerNormCoefficientsTensor); if (use_cifg) { TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients, nullptr); } else { TF_LITE_ENSURE(context, input_layer_norm_coefficients != nullptr); TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->size, 1); TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type, kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type, kTfLiteFloat32); } } const TfLiteTensor* forget_layer_norm_coefficients; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kForgetLayerNormCoefficientsTensor, &forget_layer_norm_coefficients)); TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->size, 1); TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type, kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type, kTfLiteFloat32); } const TfLiteTensor* cell_layer_norm_coefficients; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kCellLayerNormCoefficientsTensor, &cell_layer_norm_coefficients)); TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type, kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type, kTfLiteFloat32); } const TfLiteTensor* output_layer_norm_coefficients; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kOutputLayerNormCoefficientsTensor, &output_layer_norm_coefficients)); TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->size, 1); TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->data[0], n_cell); if (is_integer) { TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type, kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type, kTfLiteFloat32); } } return kTfLiteOk; } TfLiteStatus PrecomputeZeroPointTimesWeightWithBias( TfLiteContext* context, int32_t zero_point, const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor, std::unique_ptr<int32_t[]>* output) { if (weight_tensor == nullptr) { return kTfLiteOk; } const RuntimeShape& weight_shape = GetTensorShape(weight_tensor); TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2); const int row = weight_shape.Dims(0); const int col = weight_shape.Dims(1); output->reset(new int32_t[row]); if (bias_tensor == nullptr) { memset(output->get(), 0, row * sizeof(int32_t)); } else { const int32_t* bias = GetTensorData<int32_t>(bias_tensor); memcpy(output->get(), bias, row * sizeof(int32_t)); } if (zero_point != 0) { const int8_t* weight = GetTensorData<int8_t>(weight_tensor); tensor_utils::MatrixScalarMultiplyAccumulate(weight, zero_point, row, col, output->get()); } return kTfLiteOk; } TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context, OpData* op_data, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputTensor, &input)); const TfLiteTensor* output_state = GetVariableInput(context, node, lstm::full::kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); const int32_t input_zero_point = -input->params.zero_point; const int32_t output_state_zero_point = -output_state->params.zero_point; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor, &input_to_forget_weights)); const TfLiteTensor* input_to_cell_weights; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kInputToCellWeightsTensor, &input_to_cell_weights)); const TfLiteTensor* input_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor, &input_to_output_weights)); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor, &recurrent_to_forget_weights)); const TfLiteTensor* recurrent_to_cell_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor, &recurrent_to_cell_weights)); const TfLiteTensor* recurrent_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor, &recurrent_to_output_weights)); const TfLiteTensor* projection_weights = GetOptionalInputTensor( context, node, lstm::full::kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor); lstm_eval::IntegerLstmParameter* integer_lstm_params = &op_data->integer_lstm_param; const TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[4]]; TF_LITE_ENSURE(context, intermediate->quantization.type != kTfLiteNoQuantization); const auto* params = static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params); const int32_t hidden_zp = params->zero_point->data[0]; const bool is_layer_norm = op_data->use_layer_norm; const TfLiteTensor* forget_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, lstm::full::kForgetGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_forget_weights, forget_gate_bias, &(integer_lstm_params->input_to_forget_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_forget_weights, nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias))); const TfLiteTensor* cell_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, lstm::full::kCellGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_cell_weights, cell_gate_bias, &(integer_lstm_params->input_to_cell_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_cell_weights, nullptr, &(integer_lstm_params->recurrent_to_cell_effective_bias))); const TfLiteTensor* output_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, lstm::full::kOutputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_output_weights, output_gate_bias, &(integer_lstm_params->input_to_output_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_output_weights, nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias))); const TfLiteTensor* input_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, lstm::full::kInputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_input_weights, input_gate_bias, &(integer_lstm_params->input_to_input_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_input_weights, nullptr, &(integer_lstm_params->recurrent_to_input_effective_bias))); TF_LITE_ENSURE_OK(context, PrecomputeZeroPointTimesWeightWithBias( context, hidden_zp, projection_weights, projection_bias, &(integer_lstm_params->projection_effective_bias))); return kTfLiteOk; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const int scratch_tensor_index = op_data->scratch_tensor_index; bool use_layer_norm = false; if (node->inputs->size == 24) { const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor( context, node, lstm::full::kForgetLayerNormCoefficientsTensor); if (forget_layer_norm_coefficients == nullptr) { use_layer_norm = false; } else { use_layer_norm = true; } } else if (node->inputs->size == 20) { use_layer_norm = false; } else { TF_LITE_KERNEL_LOG( context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs", node->inputs->size); return kTfLiteError; } TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); op_data->use_layer_norm = use_layer_norm; const TfLiteTensor* input; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputTensor, &input)); const bool is_integer = input->type == kTfLiteInt8; TF_LITE_ENSURE(context, input->dims->size > 1); const auto* params = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>( node->builtin_data); const bool time_major = params->time_major; const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0]; const int n_input = input->dims->data[2]; const TfLiteTensor* input_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor, &input_to_output_weights)); const int n_cell = input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input); const TfLiteTensor* recurrent_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor, &recurrent_to_output_weights)); bool recurrent_to_output_is_diag = recurrent_to_output_weights->dims->size == 1 ? true : false; if (recurrent_to_output_is_diag) { TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 1); } else { TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->type, input_to_output_weights->type); } TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0], n_cell); const int n_output = recurrent_to_output_is_diag ? recurrent_to_output_weights->dims->data[0] : recurrent_to_output_weights->dims->data[1]; TF_LITE_ENSURE_OK( context, CheckInputTensorDimensions(context, node, n_input, n_output, n_cell, use_layer_norm, is_integer)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, lstm::full::kOutputTensor, &output)); TfLiteTensor* output_state = GetVariableInput(context, node, lstm::full::kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, lstm::full::kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output); TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell); TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); output_size->data[input->dims->size - 1] = n_output; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); if (is_integer) { const int num_intermediate_tensors = node->intermediates->size; TF_LITE_ENSURE(context, num_intermediate_tensors == 5); } TfLiteIntArrayFree(node->temporaries); if (IsHybridOp(input, input_to_output_weights)) { node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors); } else if (is_integer) { node->temporaries = TfLiteIntArrayCreate(6); } else { node->temporaries = TfLiteIntArrayCreate(1); } node->temporaries->data[kScratchBuffer] = scratch_tensor_index + kScratchBuffer; TfLiteTensor* scratch_buffer; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer, &scratch_buffer)); scratch_buffer->type = input->type; scratch_buffer->allocation_type = kTfLiteArenaRw; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kInputToInputWeightsTensor); const bool use_cifg = (input_to_input_weights == nullptr); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; if (use_cifg) { scratch_buffer_size->data[1] = n_cell * 4 + 16; } else { scratch_buffer_size->data[1] = n_cell * 5 + 16; } TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); if (IsHybridOp(input, input_to_output_weights)) { op_data->compute_row_sums = true; node->temporaries->data[kInputQuantized] = scratch_tensor_index + kInputQuantized; TfLiteTensor* input_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized, &input_quantized)); input_quantized->type = input_to_output_weights->type; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[kOutputStateQuantized] = scratch_tensor_index + kOutputStateQuantized; TfLiteTensor* output_state_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kOutputStateQuantized, &output_state_quantized)); output_state_quantized->type = input_to_output_weights->type; output_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(output_state_quantized->dims, output_state->dims)) { TfLiteIntArray* output_state_quantized_size = TfLiteIntArrayCopy(output_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_quantized, output_state_quantized_size)); } node->temporaries->data[kCellStateQuantized] = scratch_tensor_index + kCellStateQuantized; TfLiteTensor* cell_state_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kCellStateQuantized, &cell_state_quantized)); cell_state_quantized->type = input_to_output_weights->type; cell_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) { TfLiteIntArray* cell_state_quantized_size = TfLiteIntArrayCopy(cell_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, cell_state_quantized, cell_state_quantized_size)); } node->temporaries->data[kInputScalingFactors] = op_data->scratch_tensor_index + kInputScalingFactors; TfLiteTensor* input_sf; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, kInputScalingFactors, &input_sf)); input_sf->type = kTfLiteFloat32; input_sf->allocation_type = kTfLiteArenaRw; int scaling_dims[1] = {n_batch}; if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) { TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1); input_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_sf, input_sf_size)); } node->temporaries->data[kOutputStateScalingFactors] = op_data->scratch_tensor_index + kOutputStateScalingFactors; TfLiteTensor* output_state_sf; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, kOutputStateScalingFactors, &output_state_sf)); output_state_sf->type = kTfLiteFloat32; output_state_sf->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1); output_state_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf, output_state_sf_size)); } node->temporaries->data[kProductScalingFactors] = scratch_tensor_index + kProductScalingFactors; TfLiteTensor* prod_scaling_factors; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kProductScalingFactors, &prod_scaling_factors)); prod_scaling_factors->type = kTfLiteFloat32; prod_scaling_factors->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1); prod_scaling_factors_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, prod_scaling_factors, prod_scaling_factors_size)); } node->temporaries->data[kRecoveredCellWeights] = scratch_tensor_index + kRecoveredCellWeights; TfLiteTensor* recovered_cell_weights; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kRecoveredCellWeights, &recovered_cell_weights)); recovered_cell_weights->type = kTfLiteFloat32; recovered_cell_weights->allocation_type = kTfLiteArenaRw; int recovered_cell_dims[1] = {n_cell}; if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1, recovered_cell_dims)) { TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1); recovered_cell_weights_size->data[0] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, recovered_cell_weights, recovered_cell_weights_size)); } node->temporaries->data[kAccumScratch] = scratch_tensor_index + kAccumScratch; TfLiteTensor* accum_scratch; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch, &accum_scratch)); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {n_cell, n_batch}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = n_cell; accum_size->data[1] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[kInputZeroPoints] = op_data->scratch_tensor_index + kInputZeroPoints; TfLiteTensor* input_zp; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, kInputZeroPoints, &input_zp)); input_zp->type = kTfLiteFloat32; input_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) { TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1); input_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_zp, input_zp_size)); } node->temporaries->data[kOutputStateZeroPoints] = op_data->scratch_tensor_index + kOutputStateZeroPoints; TfLiteTensor* output_state_zp; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kOutputStateZeroPoints, &output_state_zp)); output_state_zp->type = kTfLiteFloat32; output_state_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1); output_state_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp, output_state_zp_size)); } node->temporaries->data[kRowSums] = scratch_tensor_index + kRowSums; TfLiteTensor* row_sums; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kRowSums, &row_sums)); row_sums->type = kTfLiteInt32; row_sums->name = "Lstm_row_sums"; row_sums->allocation_type = kTfLiteArenaRwPersistent; int row_sums_rows = use_cifg ? 6 : 8; const TfLiteTensor* projection_weights = GetOptionalInputTensor( context, node, lstm::full::kProjectionWeightsTensor); if (projection_weights != nullptr) { row_sums_rows += ceil(static_cast<float>(n_output) / n_cell); } int row_sums_dims[2] = {row_sums_rows, n_cell}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2); row_sums_size->data[0] = row_sums_dims[0]; row_sums_size->data[1] = row_sums_dims[1]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } if (is_integer) { PopulateQuantizedLstmParams8x8_16(context, node, &op_data->integer_lstm_param); for (int scratch_index = 0; scratch_index < 6; ++scratch_index) { node->temporaries->data[scratch_index] = op_data->scratch_tensor_index + scratch_index; TfLiteTensor* scratch_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, scratch_index, &scratch_tensor)); scratch_tensor->type = kTfLiteInt16; if (scratch_index == 4) { scratch_tensor->type = kTfLiteInt8; } else if (scratch_index == 5) { scratch_tensor->type = kTfLiteInt32; } scratch_tensor->allocation_type = kTfLiteArenaRw; const int scratch_dimension[2] = {n_batch, n_cell}; if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2, scratch_dimension)) { TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; scratch_buffer_size->data[1] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, scratch_buffer_size)); } } TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias( context, op_data, node)); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>( node->builtin_data); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const bool use_layer_norm = op_data->use_layer_norm; const bool time_major = params->time_major; const TfLiteTensor* input; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputTensor, &input)); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor, &input_to_forget_weights)); const TfLiteTensor* input_to_cell_weights; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kInputToCellWeightsTensor, &input_to_cell_weights)); const TfLiteTensor* input_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor, &input_to_output_weights)); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor, &recurrent_to_forget_weights)); const TfLiteTensor* recurrent_to_cell_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor, &recurrent_to_cell_weights)); const TfLiteTensor* recurrent_to_output_weights; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor, &recurrent_to_output_weights)); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor( context, node, lstm::full::kCellToOutputWeightsTensor); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor); const TfLiteTensor* forget_gate_bias; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor, &forget_gate_bias)); const TfLiteTensor* cell_gate_bias; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, lstm::full::kCellGateBiasTensor, &cell_gate_bias)); const TfLiteTensor* output_gate_bias; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor, &output_gate_bias)); const TfLiteTensor* projection_weights = GetOptionalInputTensor( context, node, lstm::full::kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor); TfLiteTensor* output_state = GetVariableInput(context, node, lstm::full::kOutputStateTensor); TFLITE_DCHECK(output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, lstm::full::kCellStateTensor); TFLITE_DCHECK(cell_state != nullptr); const TfLiteTensor* input_layer_norm_coefficients = use_layer_norm ? GetOptionalInputTensor( context, node, lstm::full::kInputLayerNormCoefficientsTensor) : nullptr; const TfLiteTensor* forget_layer_norm_coefficients = use_layer_norm ? GetInput(context, node, lstm::full::kForgetLayerNormCoefficientsTensor) : nullptr; const TfLiteTensor* cell_layer_norm_coefficients = use_layer_norm ? GetInput(context, node, lstm::full::kCellLayerNormCoefficientsTensor) : nullptr; const TfLiteTensor* output_layer_norm_coefficients = use_layer_norm ? GetInput(context, node, lstm::full::kOutputLayerNormCoefficientsTensor) : nullptr; TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, lstm::full::kOutputTensor, &output)); TfLiteLSTMParams lstm_params; lstm_params.activation = params->activation; lstm_params.cell_clip = params->cell_clip; lstm_params.proj_clip = params->proj_clip; lstm_params.asymmetric_quantize_inputs = params->asymmetric_quantize_inputs; switch (input_to_output_weights->type) { case kTfLiteFloat32: { TfLiteTensor* scratch_buffer; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer, &scratch_buffer)); return lstm_eval::EvalFloat( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, nullptr, nullptr, nullptr, nullptr, nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, &lstm_params, true, time_major, 0, scratch_buffer, output_state, cell_state, output, (recurrent_to_input_weights == nullptr || recurrent_to_input_weights->dims->size == 1), (recurrent_to_forget_weights->dims->size == 1), (recurrent_to_cell_weights->dims->size == 1), (recurrent_to_output_weights->dims->size == 1), CpuBackendContext::GetFromContext(context)); } case kTfLiteUInt8: case kTfLiteInt8: { const bool is_hybrid = input->type == kTfLiteFloat32; if (is_hybrid) { TfLiteTensor* scratch_buffer; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, kScratchBuffer, &scratch_buffer)); TfLiteTensor* row_sums; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kRowSums, &row_sums)); const int row_sums_size = row_sums->dims->data[0]; return lstm_eval::EvalHybrid( input, input_to_input_weights, nullptr, input_to_forget_weights, nullptr, input_to_cell_weights, nullptr, input_to_output_weights, nullptr, recurrent_to_input_weights, nullptr, recurrent_to_forget_weights, nullptr, recurrent_to_cell_weights, nullptr, recurrent_to_output_weights, nullptr, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, nullptr, nullptr, nullptr, nullptr, nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, nullptr, projection_bias, &lstm_params, true, time_major, 0, scratch_buffer, GetTemporary(context, node, kInputScalingFactors), nullptr, GetTemporary(context, node, kOutputStateScalingFactors), GetTemporary(context, node, kProductScalingFactors), GetTemporary(context, node, kRecoveredCellWeights), GetTemporary(context, node, kInputQuantized), nullptr, GetTemporary(context, node, kOutputStateQuantized), GetTemporary(context, node, kCellStateQuantized), output_state, cell_state, GetTemporary(context, node, kAccumScratch), output, GetTemporary(context, node, kInputZeroPoints), nullptr, GetTemporary(context, node, kOutputStateZeroPoints), row_sums, row_sums_size, &op_data->compute_row_sums, (recurrent_to_input_weights == nullptr || recurrent_to_input_weights->dims->size == 1), (recurrent_to_forget_weights->dims->size == 1), (recurrent_to_cell_weights->dims->size == 1), (recurrent_to_output_weights->dims->size == 1), CpuBackendContext::GetFromContext(context)); } else { TfLiteTensor* scratch0; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 0, &scratch0)); TfLiteTensor* scratch1; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1, &scratch1)); TfLiteTensor* scratch2; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2, &scratch2)); TfLiteTensor* scratch3; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 3, &scratch3)); TfLiteTensor* scratch4; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 4, &scratch4)); TfLiteTensor* scratch5; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &scratch5)); return lstm_eval::EvalInteger8x8_16( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, &lstm_params, true, time_major, &op_data->integer_lstm_param, output_state, cell_state, output, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5, CpuBackendContext::GetFromContext(context)); } } default: TF_LITE_KERNEL_LOG(context, "Type %s is not currently supported.", TfLiteTypeGetName(input_to_output_weights->type)); return kTfLiteError; } } } TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM() { static TfLiteRegistration r = {unidirectional_sequence_lstm::Init, unidirectional_sequence_lstm::Free, unidirectional_sequence_lstm::Prepare, unidirectional_sequence_lstm::Eval}; return &r; } } } }
#include <tuple> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "benchmark/benchmark.h" #include "flatbuffers/flatbuffers.h" #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/kernels/unidirectional_sequence_lstm_test_util.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { namespace { using ::testing::ElementsAreArray; class BaseUnidirectionalLstmTest : public ::testing::TestWithParam<bool> { protected: std::vector<float> input_to_input_weights_; std::vector<float> input_to_cell_weights_; std::vector<float> input_to_forget_weights_; std::vector<float> input_to_output_weights_; std::vector<float> input_gate_bias_; std::vector<float> cell_gate_bias_; std::vector<float> forget_gate_bias_; std::vector<float> output_gate_bias_; std::vector<float> recurrent_to_input_weights_; std::vector<float> recurrent_to_cell_weights_; std::vector<float> recurrent_to_forget_weights_; std::vector<float> recurrent_to_output_weights_; std::vector<float> cell_to_input_weights_; std::vector<float> cell_to_forget_weights_; std::vector<float> cell_to_output_weights_; std::vector<float> projection_weights_; std::vector<float> projection_bias_; std::vector<std::vector<float>> lstm_input_; std::vector<std::vector<float>> lstm_golden_output_; void VerifyGoldens(const std::vector<std::vector<float>>& input, const std::vector<std::vector<float>>& output, UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5, bool time_major = true) { const int num_batches = input.size(); EXPECT_GT(num_batches, 0); const int num_inputs = lstm->num_inputs(); EXPECT_GT(num_inputs, 0); const int input_sequence_size = input[0].size() / num_inputs; EXPECT_GT(input_sequence_size, 0); if (time_major) { for (int i = 0; i < input_sequence_size; ++i) { for (int b = 0; b < num_batches; ++b) { const float* batch_start = input[b].data() + i * num_inputs; const float* batch_end = batch_start + num_inputs; lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start, batch_end); } } } else { for (int b = 0; b < num_batches; ++b) { const float* batch_start = input[b].data(); const float* batch_end = batch_start + input_sequence_size * num_inputs; lstm->SetInput(b * input_sequence_size * num_inputs, batch_start, batch_end); } } ASSERT_EQ(lstm->Invoke(), kTfLiteOk); const int num_outputs = lstm->num_outputs(); EXPECT_GT(num_outputs, 0); std::vector<float> expected; if (time_major) { for (int i = 0; i < input_sequence_size; ++i) { for (int b = 0; b < num_batches; ++b) { const float* golden_start_batch = output[b].data() + i * num_outputs; const float* golden_end_batch = golden_start_batch + num_outputs; expected.insert(expected.end(), golden_start_batch, golden_end_batch); } } } else { for (int b = 0; b < num_batches; ++b) { const float* golden_batch_start = output[b].data(); const float* golden_batch_end = golden_batch_start + input_sequence_size * num_outputs; expected.insert(expected.end(), golden_batch_start, golden_batch_end); } } EXPECT_THAT(lstm->GetOutput(), ElementsAreArray(ArrayFloatNear(expected, tolerance))); } }; class NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest : public BaseUnidirectionalLstmTest { void SetUp() override { input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524}; input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778}; input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212}; input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578}; input_gate_bias_ = {0., 0., 0., 0.}; cell_gate_bias_ = {0., 0., 0., 0.}; forget_gate_bias_ = {1., 1., 1., 1.}; output_gate_bias_ = {0., 0., 0., 0.}; recurrent_to_input_weights_ = { -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324, -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322, -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296}; recurrent_to_cell_weights_ = { -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841, -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659, -0.46367589, 0.26016325, -0.03894562, -0.16368064}; recurrent_to_forget_weights_ = { -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892, -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436, 0.28053468, 0.01560611, -0.20127171, -0.01140004}; recurrent_to_output_weights_ = { 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793, 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421, -0.51818722, -0.15390486, 0.0468148, 0.39922136}; lstm_input_ = {{2., 3., 3., 4., 1., 1.}}; lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765, -0.03716109, 0.12507336, 0.41193449, -0.20860538, -0.15053082, 0.09120187, 0.24278517, -0.12222792}}; } }; TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest, LstmBlackBoxTest) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; UnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, false, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {0}, {0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest, LstmBlackBoxTestBatchMajor) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; UnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, false, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {0}, {0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); std::vector<std::vector<float>> input; std::vector<std::vector<float>> output; VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 1e-5, false); } TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestUint8) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, false, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {0}, {0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_UINT8, GetParam()); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.0157651); } TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestInt8) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, false, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {0}, {0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_INT8, GetParam()); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.0157651); } class CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest : public BaseUnidirectionalLstmTest { void SetUp() override { input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726, 0.05100781, 0.04717243, 0.48944736, -0.38535351, -0.17212132}; input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988, -0.3633365, -0.22755712, 0.28253698, 0.24407166, 0.33826375}; input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593, -0.09426838, -0.44257352, 0.54939759, 0.01533556, 0.42751634}; cell_gate_bias_ = {0., 0., 0., 0.}; forget_gate_bias_ = {1., 1., 1., 1.}; output_gate_bias_ = {0., 0., 0., 0.}; recurrent_to_cell_weights_ = { 0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711, 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004, 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288, 0.21193194}; recurrent_to_forget_weights_ = { -0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827, 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795, -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349}; recurrent_to_output_weights_ = { 0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908, -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835, 0.50248802, 0.26114327, -0.43736315, 0.33149987}; cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408, 0.31544167}; cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703, -0.77109635}; lstm_input_ = {{2., 3., 3., 4., 1., 1.}}; lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646, -0.42312205, -0.01218222, 0.24201041, -0.08124574, -0.358325, -0.04621704, 0.21641694, -0.06471302}}; } }; TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest, LstmBlackBoxTest) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; UnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, true, true, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {0, 0}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {0, 0}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {n_cell}, {n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestUint8) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, true, true, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {0, 0}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {0, 0}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {n_cell}, {n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_UINT8, GetParam()); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573); } TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestInt8) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, true, true, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {0, 0}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {0, 0}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {n_cell}, {n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_INT8, GetParam()); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573); } class NoCifgPeepholeProjectionClippingUnidirectionalLstmTest : public BaseUnidirectionalLstmTest { void SetUp() override { input_to_input_weights_ = { 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463, 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048, -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385, -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282, -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627, -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226, -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059, 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698, 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206, 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585, -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063, 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603, -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682, -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988, -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764, 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476, -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012, -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604, -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654, -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677}; input_to_forget_weights_ = { -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236, -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505, -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495, 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323, 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421, -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887, -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791, 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059, 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068, 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905, 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605, -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464, 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506, -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063, -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375, 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553, 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353, 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717, -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371, 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496}; input_to_cell_weights_ = { -0.04580283, -0.09549462, -0.032418985, -0.06454633, -0.043528453, 0.043018587, -0.049152344, -0.12418144, -0.078985475, -0.07596889, 0.019484362, -0.11434962, -0.0074034138, -0.06314844, -0.092981495, 0.0062155537, -0.025034338, -0.0028890965, 0.048929527, 0.06235075, 0.10665918, -0.032036792, -0.08505916, -0.10843358, -0.13002433, -0.036816437, -0.02130134, -0.016518239, 0.0047691227, -0.0025825808, 0.066017866, 0.029991534, -0.10652836, -0.1037554, -0.13056071, -0.03266643, -0.033702414, -0.006473424, -0.04611692, 0.014419339, -0.025174323, 0.0396852, 0.081777506, 0.06157468, 0.10210095, -0.009658194, 0.046511717, 0.03603906, 0.0069369148, 0.015960095, -0.06507666, 0.09551598, 0.053568836, 0.06408714, 0.12835667, -0.008714329, -0.20211966, -0.12093674, 0.029450472, 0.2849013, -0.029227901, 0.1164364, -0.08560263, 0.09941786, -0.036999565, -0.028842626, -0.0033637602, -0.017012902, -0.09720865, -0.11193351, -0.029155117, -0.017936034, -0.009768936, -0.04223324, -0.036159635, 0.06505112, -0.021742892, -0.023377212, -0.07221364, -0.06430552, 0.05453865, 0.091149814, 0.06387331, 0.007518393, 0.055960953, 0.069779344, 0.046411168, 0.10509911, 0.07463894, 0.0075130584, 0.012850982, 0.04555431, 0.056955688, 0.06555285, 0.050801456, -0.009862683, 0.00826772, -0.026555609, -0.0073611983, -0.0014897042}; input_to_output_weights_ = { -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918, -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534, 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722, -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761, -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394, 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154, -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135, -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564, -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047, -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304, 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946, 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646, 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813, -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403, 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415, 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495, -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158, 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295, -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739, -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956}; input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216, -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339, -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818, 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196}; forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696, 0.11098921, 0.15378423, 0.09263801, 0.09790885, 0.09508917, 0.061199076, 0.07665568, -0.015443159, -0.03499149, 0.046190713, 0.08895977, 0.10899629, 0.40694186, 0.06030037, 0.012413437, -0.06108739}; cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873, -0.1483596, -0.10639995, -0.091433935, 0.058573797, -0.06809782, -0.07889636, -0.043246906, -0.09829136, -0.4279842, 0.034901652, 0.18797937, 0.0075234566, 0.016178843, 0.1749513, 0.13975595, 0.92058027}; output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795, 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895, 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149, -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877}; recurrent_to_input_weights_ = { -0.001374326, -0.078856036, 0.10672688, 0.029162422, -0.11585556, 0.02557986, -0.13446963, -0.035785314, -0.01244275, 0.025961924, -0.02337298, -0.044228926, -0.055839065, -0.046598054, -0.010546039, -0.06900766, 0.027239809, 0.022582639, -0.013296484, -0.05459212, 0.08981, -0.045407712, 0.08682226, -0.06867011, -0.14390695, -0.02916037, 0.000996957, 0.091420636, 0.14283475, -0.07390571, -0.06402044, 0.062524505, -0.093129106, 0.04860203, -0.08364217, -0.08119002, 0.009352075, 0.22920375, 0.0016303885, 0.11583097, -0.13732095, 0.012405723, -0.07551853, 0.06343048, 0.12162708, -0.031923793, -0.014335606, 0.01790974, -0.10650317, -0.0724401, 0.08554849, -0.05727212, 0.06556731, -0.042729504, -0.043227166, 0.011683251, -0.013082158, -0.029302018, -0.010899579, -0.062036745, -0.022509435, -0.00964907, -0.01567329, 0.04260106, -0.07787477, -0.11576462, 0.017356863, 0.048673786, -0.017577527, -0.05527947, -0.082487635, -0.040137455, -0.10820036, -0.04666372, 0.022746278, -0.07851417, 0.01068115, 0.032956902, 0.022433773, 0.0026891115, 0.08944216, -0.0685835, 0.010513544, 0.07228705, 0.02032331, -0.059686817, -0.0005566496, -0.086984694, 0.040414046, -0.1380399, 0.094208956, -0.05722982, 0.012092817, -0.04989123, -0.086576, -0.003399834, -0.04696032, -0.045747425, 0.10091314, 0.048676282, -0.029037097, 0.031399418, -0.0040285117, 0.047237843, 0.09504992, 0.041799378, -0.049185462, -0.031518843, -0.10516937, 0.026374253, 0.10058866, -0.0033195973, -0.041975245, 0.0073591834, 0.0033782164, -0.004325073, -0.10167381, 0.042500053, -0.01447153, 0.06464186, -0.017142897, 0.03312627, 0.009205989, 0.024138335, -0.011337001, 0.035530265, -0.010912711, 0.0706555, -0.005894094, 0.051841937, -0.1401738, -0.02351249, 0.0365468, 0.07590991, 0.08838724, 0.021681072, -0.10086113, 0.019608743, -0.06195883, 0.077335775, 0.023646897, -0.095322326, 0.02233014, 0.09756986, -0.048691444, -0.009579111, 0.07595467, 0.11480546, -0.09801813, 0.019894179, 0.08502348, 0.004032281, 0.037211012, 0.068537936, -0.048005626, -0.091520436, -0.028379958, -0.01556313, 0.06554592, -0.045599163, -0.01672207, -0.020169014, -0.011877351, -0.20212261, 0.010889619, 0.0047078193, 0.038385306, 0.08540671, -0.017140968, -0.0035865551, 0.016678626, 0.005633034, 0.015963363, 0.00871737, 0.060130805, 0.028611384, 0.10109069, -0.015060172, -0.07894427, 0.06401885, 0.011584063, -0.024466386, 0.0047652307, -0.09041358, 0.030737216, -0.0046374933, 0.14215417, -0.11823516, 0.019899689, 0.006106124, -0.027092824, 0.0786356, 0.05052217, -0.058925, -0.011402121, -0.024987547, -0.0013661642, -0.06832946, -0.015667673, -0.1083353, -0.00096863037, -0.06988685, -0.053350925, -0.027275559, -0.033664223, -0.07978348, -0.025200296, -0.017207067, -0.058403496, -0.055697463, 0.005798788, 0.12965427, -0.062582195, 0.0013350133, -0.10482091, 0.0379771, 0.072521195, -0.0029455067, -0.13797039, -0.03628521, 0.013806405, -0.017858358, -0.01008298, -0.07700066, -0.017081132, 0.019358726, 0.0027079724, 0.004635139, 0.062634714, -0.02338735, -0.039547626, -0.02050681, 0.03385117, -0.083611414, 0.002862572, -0.09421313, 0.058618143, -0.08598433, 0.00972939, 0.023867095, -0.053934585, -0.023203006, 0.07452513, -0.048767887, -0.07314807, -0.056307215, -0.10433547, -0.06440842, 0.04328182, 0.04389765, -0.020006588, -0.09076438, -0.11652589, -0.021705797, 0.03345259, -0.010329105, -0.025767034, 0.013057034, -0.07316461, -0.10145612, 0.06358255, 0.18531723, 0.07759293, 0.12006465, 0.1305557, 0.058638252, -0.03393652, 0.09622831, -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845, -0.005644518, 0.06857898, -0.12598175, -0.035084512, 0.03156317, -0.12794146, -0.031963028, 0.04692781, 0.030070418, 0.0071660685, -0.095516115, -0.004643372, 0.040170413, -0.062104587, -0.0037324072, 0.0554317, 0.08184801, -0.019164372, 0.06791302, 0.034257166, -0.10307039, 0.021943003, 0.046745934, 0.0790918, -0.0265588, -0.007824208, 0.042546265, -0.00977924, -0.0002440307, -0.017384544, -0.017990116, 0.12252321, -0.014512694, -0.08251313, 0.08861942, 0.13589665, 0.026351685, 0.012641483, 0.07466548, 0.044301085, -0.045414884, -0.051112458, 0.03444247, -0.08502782, -0.04106223, -0.028126027, 0.028473156, 0.10467447}; recurrent_to_cell_weights_ = { -0.037322544, 0.018592842, 0.0056175636, -0.06253426, 0.055647098, -0.05713207, -0.05626563, 0.005559383, 0.03375411, -0.025757805, -0.088049285, 0.06017052, -0.06570978, 0.007384076, 0.035123326, -0.07920549, 0.053676967, 0.044480428, -0.07663568, 0.0071805613, 0.08089997, 0.05143358, 0.038261272, 0.03339287, -0.027673481, 0.044746667, 0.028349208, 0.020090483, -0.019443132, -0.030755889, -0.0040000007, 0.04465846, -0.021585021, 0.0031670958, 0.0053199246, -0.056117613, -0.10893326, 0.076739706, -0.08509834, -0.027997585, 0.037871376, 0.01449768, -0.09002357, -0.06111149, -0.046195522, 0.0422062, -0.005683705, -0.1253618, -0.012925729, -0.04890792, 0.06985068, 0.037654128, 0.03398274, -0.004781977, 0.007032333, -0.031787455, 0.010868644, -0.031489216, 0.09525667, 0.013939797, 0.0058680447, 0.0167067, 0.02668468, -0.04797466, -0.048885044, -0.12722108, 0.035304096, 0.06554885, 0.00972396, -0.039238118, -0.05159735, -0.11329045, 0.1613692, -0.03750952, 0.06529313, -0.071974665, -0.11769596, 0.015524369, -0.0013754242, -0.12446318, 0.02786344, -0.014179351, 0.005264273, 0.14376344, 0.015983658, 0.03406988, -0.06939408, 0.040699873, 0.02111075, 0.09669095, 0.041345075, -0.08316494, -0.07684199, -0.045768797, 0.032298047, -0.041805092, 0.0119405, 0.0061010392, 0.12652606, 0.0064572375, -0.024950314, 0.11574242, 0.04508852, -0.04335324, 0.06760663, -0.027437469, 0.07216407, 0.06977076, -0.05438599, 0.034033038, -0.028602652, 0.05346137, 0.043184172, -0.037189785, 0.10420091, 0.00882477, -0.054019816, -0.074273005, -0.030617684, -0.0028467078, 0.024302477, -0.0038869337, 0.005332455, 0.0013399826, 0.04361412, -0.007001822, 0.09631092, -0.06702025, -0.042049985, -0.035070654, -0.04103342, -0.10273396, 0.0544271, 0.037184782, -0.13150354, -0.0058036847, -0.008264958, 0.042035464, 0.05891794, 0.029673764, 0.0063542654, 0.044788733, 0.054816857, 0.062257513, -0.00093483756, 0.048938446, -0.004952862, -0.007730018, -0.04043371, -0.017094059, 0.07229206, -0.023670016, -0.052195564, -0.025616996, -0.01520939, 0.045104615, -0.007376126, 0.003533447, 0.006570588, 0.056037236, 0.12436656, 0.051817212, 0.028532185, -0.08686856, 0.11868599, 0.07663395, -0.07323171, 0.03463402, -0.050708205, -0.04458982, -0.11590894, 0.021273347, 0.1251325, -0.15313013, -0.12224372, 0.17228661, 0.023029093, 0.086124025, 0.006445803, -0.03496501, 0.028332196, 0.04449512, -0.042436164, -0.026587414, -0.006041347, -0.09292539, -0.05678812, 0.03897832, 0.09465633, 0.008115513, -0.02171956, 0.08304309, 0.071401566, 0.019622514, 0.032163795, -0.004167056, 0.02295182, 0.030739572, 0.056506045, 0.004612461, 0.06524936, 0.059999723, 0.046395954, -0.0045512207, -0.1335546, -0.030136576, 0.11584653, -0.014678886, 0.0020118146, -0.09688814, -0.0790206, 0.039770417, -0.0329582, 0.07922767, 0.029322514, 0.026405897, 0.04207835, -0.07073373, 0.063781224, 0.0859677, -0.10925287, -0.07011058, 0.048005477, 0.03438226, -0.09606514, -0.006669445, -0.043381985, 0.04240257, -0.06955775, -0.06769346, 0.043903265, -0.026784198, -0.017840602, 0.024307009, -0.040079936, -0.019946516, 0.045318738, -0.12233574, 0.026170589, 0.0074471775, 0.15978073, 0.10185836, 0.10298046, -0.015476589, -0.039390966, -0.072174534, 0.0739445, -0.1211869, -0.0347889, -0.07943156, 0.014809798, -0.12412325, -0.0030663363, 0.039695457, 0.0647603, -0.08291318, -0.018529687, -0.004423833, 0.0037507233, 0.084633216, -0.01514876, -0.056505352, -0.012800942, -0.06994386, 0.012962922, -0.031234352, 0.07029052, 0.016418684, 0.03618972, 0.055686004, -0.08663945, -0.017404709, -0.054761406, 0.029065743, 0.052404847, 0.020238016, 0.0048197987, -0.0214882, 0.07078733, 0.013016777, 0.06262858, 0.009184685, 0.020785125, -0.043904778, -0.0270329, -0.03299152, -0.060088247, -0.015162964, -0.001828936, 0.12642565, -0.056757294, 0.013586685, 0.09232601, -0.035886683, 0.06000002, 0.05229691, -0.052580316, -0.082029596, -0.010794592, 0.012947712, -0.036429964, -0.085508935, -0.13127148, -0.017744139, 0.031502828, 0.036232427, -0.031581745, 0.023051167, -0.05325106, -0.03421577, 0.028793324, -0.034633752, -0.009881397, -0.043551125, -0.018609839, 0.0019097115, -0.008799762, 0.056595087, 0.0022273948, 0.055752404}; recurrent_to_forget_weights_ = { -0.057784554, -0.026057621, -0.068447545, -0.022581743, 0.14811787, 0.10826372, 0.09471067, 0.03987225, -0.0039523416, 0.00030638507, 0.053185795, 0.10572994, 0.08414449, -0.022036452, -0.00066928595, -0.09203576, 0.032950465, -0.10985798, -0.023809856, 0.0021431844, -0.02196096, -0.00326074, 0.00058621005, -0.074678116, -0.06193199, 0.055729095, 0.03736828, 0.020123724, 0.061878487, -0.04729229, 0.034919553, -0.07585433, -0.04421272, -0.044019096, 0.085488975, 0.04058006, -0.06890133, -0.030951202, -0.024628663, -0.07672815, 0.034293607, 0.08556707, -0.05293577, -0.033561368, -0.04899627, 0.0241671, 0.015736353, -0.095442444, -0.029564252, 0.016493602, -0.035026584, 0.022337519, -0.026871363, 0.004780428, 0.0077918363, -0.03601621, 0.016435321, -0.03263031, -0.09543275, -0.047392778, 0.013454138, 0.028934088, 0.01685226, -0.086110644, -0.046250615, -0.01847454, 0.047608484, 0.07339695, 0.034546845, -0.04881143, 0.009128804, -0.08802852, 0.03761666, 0.008096139, -0.014454086, 0.014361001, -0.023502491, -0.0011840804, -0.07607001, 0.001856849, -0.06509276, -0.006021153, -0.08570962, -0.1451793, 0.060212336, 0.055259194, 0.06974018, 0.049454916, -0.027794661, -0.08077226, -0.016179763, 0.1169753, 0.17213494, -0.0056326236, -0.053934924, -0.0124349, -0.11520337, 0.05409887, 0.088759385, 0.0019655675, 0.0042065294, 0.03881498, 0.019844765, 0.041858196, -0.05695512, 0.047233116, 0.038937137, -0.06542224, 0.014429736, -0.09719407, 0.13908425, -0.05379757, 0.012321099, 0.082840554, -0.029899208, 0.044217527, 0.059855383, 0.07711018, -0.045319796, 0.0948846, -0.011724666, -0.0033288454, -0.033542685, -0.04764985, -0.13873616, 0.040668588, 0.034832682, -0.015319203, -0.018715994, 0.046002675, 0.0599172, -0.043107376, 0.0294216, -0.002314414, -0.022424703, 0.0030315618, 0.0014641669, 0.0029166266, -0.11878115, 0.013738511, 0.12375372, -0.0006038222, 0.029104086, 0.087442465, 0.052958444, 0.07558703, 0.04817258, 0.044462286, -0.015213451, -0.08783778, -0.0561384, -0.003008196, 0.047060397, -0.002058388, 0.03429439, -0.018839769, 0.024734668, 0.024614193, -0.042046934, 0.09597743, -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786, -0.02558259, -0.022822596, -0.023273505, -0.02464396, -0.10991725, -0.006240552, 0.0074488563, 0.024044557, 0.04383914, -0.046476185, 0.028658995, 0.060410924, 0.050786525, 0.009452605, -0.0073054377, -0.024810238, 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517, 0.015898481, 0.021362653, -0.030262267, 0.016587038, -0.011442813, 0.041154444, -0.007631438, -0.03423484, -0.010977775, 0.036152758, 0.0066366293, 0.11915515, 0.02318443, -0.041350313, 0.021485701, -0.10906167, -0.028218046, -0.00954771, 0.020531068, -0.11995105, -0.03672871, 0.024019798, 0.014255957, -0.05221243, -0.00661567, -0.04630967, 0.033188973, 0.10107534, -0.014027541, 0.030796422, -0.10270911, -0.035999842, 0.15443139, 0.07684145, 0.036571592, -0.035900835, -0.0034699554, 0.06209149, 0.015920248, -0.031122351, -0.03858649, 0.01849943, 0.13872518, 0.01503974, 0.069941424, -0.06948533, -0.0088794185, 0.061282158, -0.047401894, 0.03100163, -0.041533746, -0.10430945, 0.044574402, -0.01425562, -0.024290353, 0.034563623, 0.05866852, 0.023947537, -0.09445152, 0.035450947, 0.02247216, -0.0042998926, 0.061146557, -0.10250651, 0.020881841, -0.06747029, 0.10062043, -0.0023941975, 0.03532124, -0.016341697, 0.09685456, -0.016764693, 0.051808182, 0.05875331, -0.04536488, 0.001626336, -0.028892258, -0.01048663, -0.009793449, -0.017093895, 0.010987891, 0.02357273, -0.00010856845, 0.0099760275, -0.001845119, -0.03551521, 0.0018358806, 0.05763657, -0.01769146, 0.040995963, 0.02235177, -0.060430344, 0.11475477, -0.023854522, 0.10071741, 0.0686208, -0.014250481, 0.034261297, 0.047418304, 0.08562733, -0.030519066, 0.0060542435, 0.014653856, -0.038836084, 0.04096551, 0.032249358, -0.08355519, -0.026823482, 0.056386515, -0.010401743, -0.028396193, 0.08507674, 0.014410365, 0.020995233, 0.17040324, 0.11511526, 0.02459721, 0.0066619175, 0.025853224, -0.023133837, -0.081302024, 0.017264642, -0.009585969, 0.09491168, -0.051313367, 0.054532815, -0.014298593, 0.10657464, 0.007076659, 0.10964551, 0.0409152, 0.008275321, -0.07283536, 0.07937492, 0.04192024, -0.1075027}; recurrent_to_output_weights_ = { 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415, -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349, -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948, -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774, -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125, -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224, -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088, 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867, -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728, 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607, -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928, -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462, 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879, 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698, -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146, 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345, 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166, 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203, 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743, 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415, -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618, 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891, -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015, 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109, 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886, 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396, -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282, -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025, -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575, -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277, -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719, -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215, 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483, 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102, -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775, 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841, -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656, -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286, -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309, 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545, 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754, 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831, -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697, 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453, -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222, -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989, -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827, -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949, 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819, -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954, 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228, -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001, -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939, -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556, -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718, 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893, 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974, -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485, 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856, 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853, -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019, 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024, 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994, 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621, }; cell_to_input_weights_ = { 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458, -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174, -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047, 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175}; cell_to_forget_weights_ = { -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276, -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766, -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774, 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355}; cell_to_output_weights_ = { 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764, -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544, -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817, 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733}; projection_weights_ = { -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832, 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683, -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931, -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476, 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067, 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787, 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588, 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285, -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949, -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768, -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929, 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504, 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946, 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117, 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253, 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456, -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552, 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797, -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272, 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165, -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922, -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548, 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786, -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722, 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318, -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776, -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307, 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969, -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593, -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515, -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288, 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723, 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097, -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209, 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268, 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139, 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707, 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871, 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553, -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702, -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615, 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187, -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388, -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709, 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263, 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777, 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935, -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641, -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996, -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318, 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437, -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079, 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237, 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415, -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124, -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943, -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311, 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013, -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364, -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543, -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102, 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906, 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955, 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656}; lstm_input_ = { { 0.787926, 0.151646, 0.071352, 0.118426, 0.458058, 0.596268, 0.998386, 0.568695, 0.864524, 0.571277, 0.073204, 0.296072, 0.743333, 0.069199, 0.045348, 0.867394, 0.291279, 0.013714, 0.482521, 0.626339}, { 0.295743, 0.544053, 0.690064, 0.858138, 0.497181, 0.642421, 0.524260, 0.134799, 0.003639, 0.162482, 0.640394, 0.930399, 0.050782, 0.432485, 0.988078, 0.082922, 0.563329, 0.865614, 0.333232, 0.259916} }; lstm_golden_output_ = { { -0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576, -0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004, -0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147, 0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363, -0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322, -0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308, 0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794, 0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474, 0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827, 0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512, -0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407, -0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193, 0.0286833, 0.00824207, 0.0264887, 0.0305169}, { -0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926, -0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232, 0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954, 0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507, -0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039, -0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233, 0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378, 0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034, 0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789, 0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855, -0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679, -0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181, 0.0412031, 0.0118723, 0.0239643, 0.0394009}}; } }; TEST_F(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest, LstmBlackBoxTest) { const int n_batch = 2; const int n_input = 5; const int n_cell = 20; const int n_output = 16; const int sequence_length = 4; UnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, true, true, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_output, n_cell}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToInputWeights(cell_to_input_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); lstm.SetProjectionWeights(projection_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestUint8) { const int n_batch = 2; const int n_input = 5; const int n_cell = 20; const int n_output = 16; const int sequence_length = 4; if (GetParam()) { return; } HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, true, true, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_output, n_cell}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_UINT8, GetParam()); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToInputWeights(cell_to_input_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); lstm.SetProjectionWeights(projection_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467); } TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest, HybridLstmBlackBoxTestInt8) { if (GetParam()) { return; } const int n_batch = 2; const int n_input = 5; const int n_cell = 20; const int n_output = 16; const int sequence_length = 4; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, true, true, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_output, n_cell}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }, TensorType_INT8, GetParam()); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToInputWeights(cell_to_input_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); lstm.SetProjectionWeights(projection_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467); } class NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest : public BaseUnidirectionalLstmTest { void SetUp() override { input_to_input_weights_ = { 0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463, 0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048, -0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385, -0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282, -0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627, -0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226, -0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059, 0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698, 0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206, 0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585, -0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063, 0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603, -0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682, -0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988, -0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764, 0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476, -0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012, -0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604, -0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654, -0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677}; input_to_forget_weights_ = { -0.0018401089, -0.004852237, 0.03698424, 0.014181704, 0.028273236, -0.016726194, -0.05249759, -0.10204261, 0.00861066, -0.040979505, -0.009899187, 0.01923892, -0.028177269, -0.08535103, -0.14585495, 0.10662567, -0.01909731, -0.017883534, -0.0047269356, -0.045103323, 0.0030784295, 0.076784775, 0.07463696, 0.094531395, 0.0814421, -0.12257899, -0.033945758, -0.031303465, 0.045630626, 0.06843887, -0.13492945, -0.012480007, -0.0811829, -0.07224499, -0.09628791, 0.045100946, 0.0012300825, 0.013964662, 0.099372394, 0.02543059, 0.06958324, 0.034257296, 0.0482646, 0.06267997, 0.052625068, 0.12784666, 0.07077897, 0.025725935, 0.04165009, 0.07241905, 0.018668644, -0.037377294, -0.06277783, -0.08833636, -0.040120605, -0.011405586, -0.007808335, -0.010301386, -0.005102167, 0.027717464, 0.05483423, 0.11449111, 0.11289652, 0.10939839, 0.13396506, -0.08402166, -0.01901462, -0.044678304, -0.07720565, 0.014350063, -0.11757958, -0.0652038, -0.08185733, -0.076754324, -0.092614375, 0.10405491, 0.052960336, 0.035755895, 0.035839386, -0.012540553, 0.036881298, 0.02913376, 0.03420159, 0.05448447, -0.054523353, 0.02582715, 0.02327355, -0.011857179, -0.0011980024, -0.034641717, -0.026125094, -0.17582615, -0.15923657, -0.27486774, -0.0006143371, 0.0001771948, -8.470171e-05, 0.02651807, 0.045790765, 0.06956496}; input_to_cell_weights_ = { -0.04580283, -0.09549462, -0.032418985, -0.06454633, -0.043528453, 0.043018587, -0.049152344, -0.12418144, -0.078985475, -0.07596889, 0.019484362, -0.11434962, -0.0074034138, -0.06314844, -0.092981495, 0.0062155537, -0.025034338, -0.0028890965, 0.048929527, 0.06235075, 0.10665918, -0.032036792, -0.08505916, -0.10843358, -0.13002433, -0.036816437, -0.02130134, -0.016518239, 0.0047691227, -0.0025825808, 0.066017866, 0.029991534, -0.10652836, -0.1037554, -0.13056071, -0.03266643, -0.033702414, -0.006473424, -0.04611692, 0.014419339, -0.025174323, 0.0396852, 0.081777506, 0.06157468, 0.10210095, -0.009658194, 0.046511717, 0.03603906, 0.0069369148, 0.015960095, -0.06507666, 0.09551598, 0.053568836, 0.06408714, 0.12835667, -0.008714329, -0.20211966, -0.12093674, 0.029450472, 0.2849013, -0.029227901, 0.1164364, -0.08560263, 0.09941786, -0.036999565, -0.028842626, -0.0033637602, -0.017012902, -0.09720865, -0.11193351, -0.029155117, -0.017936034, -0.009768936, -0.04223324, -0.036159635, 0.06505112, -0.021742892, -0.023377212, -0.07221364, -0.06430552, 0.05453865, 0.091149814, 0.06387331, 0.007518393, 0.055960953, 0.069779344, 0.046411168, 0.10509911, 0.07463894, 0.0075130584, 0.012850982, 0.04555431, 0.056955688, 0.06555285, 0.050801456, -0.009862683, 0.00826772, -0.026555609, -0.0073611983, -0.0014897042}; input_to_output_weights_ = { -0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918, -0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534, 0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722, -0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761, -0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394, 0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154, -0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135, -0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564, -0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047, -0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304, 0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946, 0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646, 0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813, -0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403, 0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415, 0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495, -0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158, 0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295, -0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739, -0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956}; input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666, 0.053110216, -0.06928846, -0.13942584, -0.11816189, 0.19483899, 0.03652339, -0.10250295, 0.036714908, -0.18426876, 0.036065217, 0.21810818, 0.02383196, -0.043370757, 0.08690144, -0.04444982, 0.00030581196}; forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696, 0.11098921, 0.15378423, 0.09263801, 0.09790885, 0.09508917, 0.061199076, 0.07665568, -0.015443159, -0.03499149, 0.046190713, 0.08895977, 0.10899629, 0.40694186, 0.06030037, 0.012413437, -0.06108739}; cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873, -0.1483596, -0.10639995, -0.091433935, 0.058573797, -0.06809782, -0.07889636, -0.043246906, -0.09829136, -0.4279842, 0.034901652, 0.18797937, 0.0075234566, 0.016178843, 0.1749513, 0.13975595, 0.92058027}; output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113, 0.027195795, 0.35373217, -0.018957434, 0.008907322, -0.0762701, 0.12018895, 0.04216877, 0.0022856654, 0.040952638, 0.3147856, 0.08225149, -0.057416286, -0.14995944, -0.008040261, 0.13208859, 0.029760877}; recurrent_to_input_weights_ = { -0.001374326, -0.078856036, 0.10672688, 0.029162422, -0.11585556, 0.02557986, -0.13446963, -0.035785314, -0.01244275, 0.025961924, -0.02337298, -0.044228926, -0.055839065, -0.046598054, -0.010546039, -0.06900766, 0.027239809, 0.022582639, -0.013296484, -0.05459212, 0.08981, -0.045407712, 0.08682226, -0.06867011, -0.14390695, -0.02916037, 0.000996957, 0.091420636, 0.14283475, -0.07390571, -0.06402044, 0.062524505, -0.093129106, 0.04860203, -0.08364217, -0.08119002, 0.009352075, 0.22920375, 0.0016303885, 0.11583097, -0.13732095, 0.012405723, -0.07551853, 0.06343048, 0.12162708, -0.031923793, -0.014335606, 0.01790974, -0.10650317, -0.0724401, 0.08554849, -0.05727212, 0.06556731, -0.042729504, -0.043227166, 0.011683251, -0.013082158, -0.029302018, -0.010899579, -0.062036745, -0.022509435, -0.00964907, -0.01567329, 0.04260106, -0.07787477, -0.11576462, 0.017356863, 0.048673786, -0.017577527, -0.05527947, -0.082487635, -0.040137455, -0.10820036, -0.04666372, 0.022746278, -0.07851417, 0.01068115, 0.032956902, 0.022433773, 0.0026891115, 0.08944216, -0.0685835, 0.010513544, 0.07228705, 0.02032331, -0.059686817, -0.0005566496, -0.086984694, 0.040414046, -0.1380399, 0.094208956, -0.05722982, 0.012092817, -0.04989123, -0.086576, -0.003399834, -0.04696032, -0.045747425, 0.10091314, 0.048676282, -0.029037097, 0.031399418, -0.0040285117, 0.047237843, 0.09504992, 0.041799378, -0.049185462, -0.031518843, -0.10516937, 0.026374253, 0.10058866, -0.0033195973, -0.041975245, 0.0073591834, 0.0033782164, -0.004325073, -0.10167381, 0.042500053, -0.01447153, 0.06464186, -0.017142897, 0.03312627, 0.009205989, 0.024138335, -0.011337001, 0.035530265, -0.010912711, 0.0706555, -0.005894094, 0.051841937, -0.1401738, -0.02351249, 0.0365468, 0.07590991, 0.08838724, 0.021681072, -0.10086113, 0.019608743, -0.06195883, 0.077335775, 0.023646897, -0.095322326, 0.02233014, 0.09756986, -0.048691444, -0.009579111, 0.07595467, 0.11480546, -0.09801813, 0.019894179, 0.08502348, 0.004032281, 0.037211012, 0.068537936, -0.048005626, -0.091520436, -0.028379958, -0.01556313, 0.06554592, -0.045599163, -0.01672207, -0.020169014, -0.011877351, -0.20212261, 0.010889619, 0.0047078193, 0.038385306, 0.08540671, -0.017140968, -0.0035865551, 0.016678626, 0.005633034, 0.015963363, 0.00871737, 0.060130805, 0.028611384, 0.10109069, -0.015060172, -0.07894427, 0.06401885, 0.011584063, -0.024466386, 0.0047652307, -0.09041358, 0.030737216, -0.0046374933, 0.14215417, -0.11823516, 0.019899689, 0.006106124, -0.027092824, 0.0786356, 0.05052217, -0.058925, -0.011402121, -0.024987547, -0.0013661642, -0.06832946, -0.015667673, -0.1083353, -0.00096863037, -0.06988685, -0.053350925, -0.027275559, -0.033664223, -0.07978348, -0.025200296, -0.017207067, -0.058403496, -0.055697463, 0.005798788, 0.12965427, -0.062582195, 0.0013350133, -0.10482091, 0.0379771, 0.072521195, -0.0029455067, -0.13797039, -0.03628521, 0.013806405, -0.017858358, -0.01008298, -0.07700066, -0.017081132, 0.019358726, 0.0027079724, 0.004635139, 0.062634714, -0.02338735, -0.039547626, -0.02050681, 0.03385117, -0.083611414, 0.002862572, -0.09421313, 0.058618143, -0.08598433, 0.00972939, 0.023867095, -0.053934585, -0.023203006, 0.07452513, -0.048767887, -0.07314807, -0.056307215, -0.10433547, -0.06440842, 0.04328182, 0.04389765, -0.020006588, -0.09076438, -0.11652589, -0.021705797, 0.03345259, -0.010329105, -0.025767034, 0.013057034, -0.07316461, -0.10145612, 0.06358255, 0.18531723, 0.07759293, 0.12006465, 0.1305557, 0.058638252, -0.03393652, 0.09622831, -0.16253184, -2.4580743e-06, 0.079869635, -0.070196845, -0.005644518, 0.06857898, -0.12598175, -0.035084512, 0.03156317, -0.12794146, -0.031963028, 0.04692781, 0.030070418, 0.0071660685, -0.095516115, -0.004643372, 0.040170413, -0.062104587, -0.0037324072, 0.0554317, 0.08184801, -0.019164372, 0.06791302, 0.034257166, -0.10307039, 0.021943003, 0.046745934, 0.0790918, -0.0265588, -0.007824208, 0.042546265, -0.00977924, -0.0002440307, -0.017384544, -0.017990116, 0.12252321, -0.014512694, -0.08251313, 0.08861942, 0.13589665, 0.026351685, 0.012641483, 0.07466548, 0.044301085, -0.045414884, -0.051112458, 0.03444247, -0.08502782, -0.04106223, -0.028126027, 0.028473156, 0.10467447}; recurrent_to_cell_weights_ = { -0.037322544, 0.018592842, 0.0056175636, -0.06253426, 0.055647098, -0.05713207, -0.05626563, 0.005559383, 0.03375411, -0.025757805, -0.088049285, 0.06017052, -0.06570978, 0.007384076, 0.035123326, -0.07920549, 0.053676967, 0.044480428, -0.07663568, 0.0071805613, 0.08089997, 0.05143358, 0.038261272, 0.03339287, -0.027673481, 0.044746667, 0.028349208, 0.020090483, -0.019443132, -0.030755889, -0.0040000007, 0.04465846, -0.021585021, 0.0031670958, 0.0053199246, -0.056117613, -0.10893326, 0.076739706, -0.08509834, -0.027997585, 0.037871376, 0.01449768, -0.09002357, -0.06111149, -0.046195522, 0.0422062, -0.005683705, -0.1253618, -0.012925729, -0.04890792, 0.06985068, 0.037654128, 0.03398274, -0.004781977, 0.007032333, -0.031787455, 0.010868644, -0.031489216, 0.09525667, 0.013939797, 0.0058680447, 0.0167067, 0.02668468, -0.04797466, -0.048885044, -0.12722108, 0.035304096, 0.06554885, 0.00972396, -0.039238118, -0.05159735, -0.11329045, 0.1613692, -0.03750952, 0.06529313, -0.071974665, -0.11769596, 0.015524369, -0.0013754242, -0.12446318, 0.02786344, -0.014179351, 0.005264273, 0.14376344, 0.015983658, 0.03406988, -0.06939408, 0.040699873, 0.02111075, 0.09669095, 0.041345075, -0.08316494, -0.07684199, -0.045768797, 0.032298047, -0.041805092, 0.0119405, 0.0061010392, 0.12652606, 0.0064572375, -0.024950314, 0.11574242, 0.04508852, -0.04335324, 0.06760663, -0.027437469, 0.07216407, 0.06977076, -0.05438599, 0.034033038, -0.028602652, 0.05346137, 0.043184172, -0.037189785, 0.10420091, 0.00882477, -0.054019816, -0.074273005, -0.030617684, -0.0028467078, 0.024302477, -0.0038869337, 0.005332455, 0.0013399826, 0.04361412, -0.007001822, 0.09631092, -0.06702025, -0.042049985, -0.035070654, -0.04103342, -0.10273396, 0.0544271, 0.037184782, -0.13150354, -0.0058036847, -0.008264958, 0.042035464, 0.05891794, 0.029673764, 0.0063542654, 0.044788733, 0.054816857, 0.062257513, -0.00093483756, 0.048938446, -0.004952862, -0.007730018, -0.04043371, -0.017094059, 0.07229206, -0.023670016, -0.052195564, -0.025616996, -0.01520939, 0.045104615, -0.007376126, 0.003533447, 0.006570588, 0.056037236, 0.12436656, 0.051817212, 0.028532185, -0.08686856, 0.11868599, 0.07663395, -0.07323171, 0.03463402, -0.050708205, -0.04458982, -0.11590894, 0.021273347, 0.1251325, -0.15313013, -0.12224372, 0.17228661, 0.023029093, 0.086124025, 0.006445803, -0.03496501, 0.028332196, 0.04449512, -0.042436164, -0.026587414, -0.006041347, -0.09292539, -0.05678812, 0.03897832, 0.09465633, 0.008115513, -0.02171956, 0.08304309, 0.071401566, 0.019622514, 0.032163795, -0.004167056, 0.02295182, 0.030739572, 0.056506045, 0.004612461, 0.06524936, 0.059999723, 0.046395954, -0.0045512207, -0.1335546, -0.030136576, 0.11584653, -0.014678886, 0.0020118146, -0.09688814, -0.0790206, 0.039770417, -0.0329582, 0.07922767, 0.029322514, 0.026405897, 0.04207835, -0.07073373, 0.063781224, 0.0859677, -0.10925287, -0.07011058, 0.048005477, 0.03438226, -0.09606514, -0.006669445, -0.043381985, 0.04240257, -0.06955775, -0.06769346, 0.043903265, -0.026784198, -0.017840602, 0.024307009, -0.040079936, -0.019946516, 0.045318738, -0.12233574, 0.026170589, 0.0074471775, 0.15978073, 0.10185836, 0.10298046, -0.015476589, -0.039390966, -0.072174534, 0.0739445, -0.1211869, -0.0347889, -0.07943156, 0.014809798, -0.12412325, -0.0030663363, 0.039695457, 0.0647603, -0.08291318, -0.018529687, -0.004423833, 0.0037507233, 0.084633216, -0.01514876, -0.056505352, -0.012800942, -0.06994386, 0.012962922, -0.031234352, 0.07029052, 0.016418684, 0.03618972, 0.055686004, -0.08663945, -0.017404709, -0.054761406, 0.029065743, 0.052404847, 0.020238016, 0.0048197987, -0.0214882, 0.07078733, 0.013016777, 0.06262858, 0.009184685, 0.020785125, -0.043904778, -0.0270329, -0.03299152, -0.060088247, -0.015162964, -0.001828936, 0.12642565, -0.056757294, 0.013586685, 0.09232601, -0.035886683, 0.06000002, 0.05229691, -0.052580316, -0.082029596, -0.010794592, 0.012947712, -0.036429964, -0.085508935, -0.13127148, -0.017744139, 0.031502828, 0.036232427, -0.031581745, 0.023051167, -0.05325106, -0.03421577, 0.028793324, -0.034633752, -0.009881397, -0.043551125, -0.018609839, 0.0019097115, -0.008799762, 0.056595087, 0.0022273948, 0.055752404}; recurrent_to_forget_weights_ = { -0.057784554, -0.026057621, -0.068447545, -0.022581743, 0.14811787, 0.10826372, 0.09471067, 0.03987225, -0.0039523416, 0.00030638507, 0.053185795, 0.10572994, 0.08414449, -0.022036452, -0.00066928595, -0.09203576, 0.032950465, -0.10985798, -0.023809856, 0.0021431844, -0.02196096, -0.00326074, 0.00058621005, -0.074678116, -0.06193199, 0.055729095, 0.03736828, 0.020123724, 0.061878487, -0.04729229, 0.034919553, -0.07585433, -0.04421272, -0.044019096, 0.085488975, 0.04058006, -0.06890133, -0.030951202, -0.024628663, -0.07672815, 0.034293607, 0.08556707, -0.05293577, -0.033561368, -0.04899627, 0.0241671, 0.015736353, -0.095442444, -0.029564252, 0.016493602, -0.035026584, 0.022337519, -0.026871363, 0.004780428, 0.0077918363, -0.03601621, 0.016435321, -0.03263031, -0.09543275, -0.047392778, 0.013454138, 0.028934088, 0.01685226, -0.086110644, -0.046250615, -0.01847454, 0.047608484, 0.07339695, 0.034546845, -0.04881143, 0.009128804, -0.08802852, 0.03761666, 0.008096139, -0.014454086, 0.014361001, -0.023502491, -0.0011840804, -0.07607001, 0.001856849, -0.06509276, -0.006021153, -0.08570962, -0.1451793, 0.060212336, 0.055259194, 0.06974018, 0.049454916, -0.027794661, -0.08077226, -0.016179763, 0.1169753, 0.17213494, -0.0056326236, -0.053934924, -0.0124349, -0.11520337, 0.05409887, 0.088759385, 0.0019655675, 0.0042065294, 0.03881498, 0.019844765, 0.041858196, -0.05695512, 0.047233116, 0.038937137, -0.06542224, 0.014429736, -0.09719407, 0.13908425, -0.05379757, 0.012321099, 0.082840554, -0.029899208, 0.044217527, 0.059855383, 0.07711018, -0.045319796, 0.0948846, -0.011724666, -0.0033288454, -0.033542685, -0.04764985, -0.13873616, 0.040668588, 0.034832682, -0.015319203, -0.018715994, 0.046002675, 0.0599172, -0.043107376, 0.0294216, -0.002314414, -0.022424703, 0.0030315618, 0.0014641669, 0.0029166266, -0.11878115, 0.013738511, 0.12375372, -0.0006038222, 0.029104086, 0.087442465, 0.052958444, 0.07558703, 0.04817258, 0.044462286, -0.015213451, -0.08783778, -0.0561384, -0.003008196, 0.047060397, -0.002058388, 0.03429439, -0.018839769, 0.024734668, 0.024614193, -0.042046934, 0.09597743, -0.0043254104, 0.04320769, 0.0064070094, -0.0019131786, -0.02558259, -0.022822596, -0.023273505, -0.02464396, -0.10991725, -0.006240552, 0.0074488563, 0.024044557, 0.04383914, -0.046476185, 0.028658995, 0.060410924, 0.050786525, 0.009452605, -0.0073054377, -0.024810238, 0.0052906186, 0.0066939713, -0.0020913032, 0.014515517, 0.015898481, 0.021362653, -0.030262267, 0.016587038, -0.011442813, 0.041154444, -0.007631438, -0.03423484, -0.010977775, 0.036152758, 0.0066366293, 0.11915515, 0.02318443, -0.041350313, 0.021485701, -0.10906167, -0.028218046, -0.00954771, 0.020531068, -0.11995105, -0.03672871, 0.024019798, 0.014255957, -0.05221243, -0.00661567, -0.04630967, 0.033188973, 0.10107534, -0.014027541, 0.030796422, -0.10270911, -0.035999842, 0.15443139, 0.07684145, 0.036571592, -0.035900835, -0.0034699554, 0.06209149, 0.015920248, -0.031122351, -0.03858649, 0.01849943, 0.13872518, 0.01503974, 0.069941424, -0.06948533, -0.0088794185, 0.061282158, -0.047401894, 0.03100163, -0.041533746, -0.10430945, 0.044574402, -0.01425562, -0.024290353, 0.034563623, 0.05866852, 0.023947537, -0.09445152, 0.035450947, 0.02247216, -0.0042998926, 0.061146557, -0.10250651, 0.020881841, -0.06747029, 0.10062043, -0.0023941975, 0.03532124, -0.016341697, 0.09685456, -0.016764693, 0.051808182, 0.05875331, -0.04536488, 0.001626336, -0.028892258, -0.01048663, -0.009793449, -0.017093895, 0.010987891, 0.02357273, -0.00010856845, 0.0099760275, -0.001845119, -0.03551521, 0.0018358806, 0.05763657, -0.01769146, 0.040995963, 0.02235177, -0.060430344, 0.11475477, -0.023854522, 0.10071741, 0.0686208, -0.014250481, 0.034261297, 0.047418304, 0.08562733, -0.030519066, 0.0060542435, 0.014653856, -0.038836084, 0.04096551, 0.032249358, -0.08355519, -0.026823482, 0.056386515, -0.010401743, -0.028396193, 0.08507674, 0.014410365, 0.020995233, 0.17040324, 0.11511526, 0.02459721, 0.0066619175, 0.025853224, -0.023133837, -0.081302024, 0.017264642, -0.009585969, 0.09491168, -0.051313367, 0.054532815, -0.014298593, 0.10657464, 0.007076659, 0.10964551, 0.0409152, 0.008275321, -0.07283536, 0.07937492, 0.04192024, -0.1075027}; recurrent_to_output_weights_ = { 0.025825322, -0.05813119, 0.09495884, -0.045984812, -0.01255415, -0.0026479573, -0.08196161, -0.054914974, -0.0046604523, -0.029587349, -0.044576716, -0.07480124, -0.082868785, 0.023254942, 0.027502948, -0.0039728214, -0.08683098, -0.08116779, -0.014675607, -0.037924774, -0.023314456, -0.007401714, -0.09255757, 0.029460307, -0.08829125, -0.005139627, -0.08989442, -0.0555066, 0.13596267, -0.025062224, -0.048351806, -0.03850004, 0.07266485, -0.022414139, 0.05940088, 0.075114764, 0.09597592, -0.010211725, -0.0049794707, -0.011523867, -0.025980417, 0.072999895, 0.11091378, -0.081685916, 0.014416728, 0.043229222, 0.034178585, -0.07530371, 0.035837382, -0.085607, -0.007721233, -0.03287832, -0.043848954, -0.06404588, -0.06632928, -0.073643476, 0.008214239, -0.045984086, 0.039764922, 0.03474462, 0.060612556, -0.080590084, 0.049127717, 0.04151091, -0.030063879, 0.008801774, -0.023021035, -0.019558564, 0.05158114, -0.010947698, -0.011825728, 0.0075720972, 0.0699727, -0.0039981045, 0.069350146, 0.08799282, 0.016156472, 0.035502106, 0.11695009, 0.006217345, 0.13392477, -0.037875112, 0.025745004, 0.08940699, -0.00924166, 0.0046702605, -0.036598757, -0.08811812, 0.10522024, -0.032441203, 0.008176899, -0.04454919, 0.07058152, 0.0067963637, 0.039206743, 0.03259838, 0.03725492, -0.09515802, 0.013326398, -0.052055415, -0.025676316, 0.03198509, -0.015951829, -0.058556724, 0.036879618, 0.043357447, 0.028362012, -0.05908629, 0.0059240665, -0.04995891, -0.019187413, 0.0276265, -0.01628143, 0.0025863599, 0.08800015, 0.035250366, -0.022165963, -0.07328642, -0.009415526, -0.07455109, 0.11690406, 0.0363299, 0.07411125, 0.042103454, -0.009660886, 0.019076364, 0.018299393, -0.046004917, 0.08891175, 0.0431396, -0.026327137, -0.051502608, 0.08979574, -0.051670972, 0.04940282, -0.07491107, -0.021240504, 0.022596184, -0.034280192, 0.060163025, -0.058211457, -0.051837247, -0.01349775, -0.04639988, -0.035936575, -0.011681591, 0.064818054, 0.0073146066, -0.021745546, -0.043124277, -0.06471268, -0.07053354, -0.029321948, -0.05330136, 0.016933719, -0.053782392, 0.13747959, -0.1361751, -0.11569455, 0.0033329215, 0.05693899, -0.053219706, 0.063698, 0.07977434, -0.07924483, 0.06936997, 0.0034815092, -0.007305279, -0.037325785, -0.07251102, -0.033633437, -0.08677009, 0.091591336, -0.14165086, 0.021752775, 0.019683983, 0.0011612234, -0.058154266, 0.049996935, 0.0288841, -0.0024567875, -0.14345716, 0.010955264, -0.10234828, 0.1183656, -0.0010731248, -0.023590032, -0.072285876, -0.0724771, -0.026382286, -0.0014920527, 0.042667855, 0.0018776858, 0.02986552, 0.009814309, 0.0733756, 0.12289186, 0.018043943, -0.0458958, 0.049412545, 0.033632483, 0.05495232, 0.036686596, -0.013781798, -0.010036754, 0.02576849, -0.08307328, 0.010112348, 0.042521734, -0.05869831, -0.071689695, 0.03876447, -0.13275425, -0.0352966, -0.023077697, 0.10285965, 0.084736146, 0.15568255, -0.00040734606, 0.027835453, -0.10292561, -0.032401145, 0.10053256, -0.026142767, -0.08271222, -0.0030240538, -0.016368777, 0.1070414, 0.042672627, 0.013456989, -0.0437609, -0.022309763, 0.11576483, 0.04108048, 0.061026827, -0.0190714, -0.0869359, 0.037901703, 0.0610107, 0.07202949, 0.01675338, 0.086139716, -0.08795751, -0.014898893, -0.023771819, -0.01965048, 0.007955471, -0.043740474, 0.03346837, -0.10549954, 0.090567775, 0.042013682, -0.03176985, 0.12569028, -0.02421228, -0.029526481, 0.023851605, 0.031539805, 0.05292009, -0.02344001, -0.07811758, -0.08834428, 0.10094801, 0.16594367, -0.06861939, -0.021256343, -0.041093912, -0.06669611, 0.035498552, 0.021757556, -0.09302526, -0.015403468, -0.06614931, -0.051798206, -0.013874718, 0.03630673, 0.010412845, -0.08077351, 0.046185967, 0.0035662893, 0.03541868, -0.094149634, -0.034814864, 0.003128424, -0.020674974, -0.03944324, -0.008110165, -0.11113267, 0.08484226, 0.043586485, 0.040582247, 0.0968012, -0.065249965, -0.028036479, 0.0050708856, 0.0017462453, 0.0326779, 0.041296225, 0.09164146, -0.047743853, -0.015952192, -0.034451712, 0.084197424, -0.05347844, -0.11768019, 0.085926116, -0.08251791, -0.045081906, 0.0948852, 0.068401024, 0.024856757, 0.06978981, -0.057309967, -0.012775832, -0.0032452994, 0.01977615, -0.041040014, -0.024264973, 0.063464895, 0.05431621, }; cell_to_input_weights_ = { 0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458, -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174, -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047, 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175}; cell_to_forget_weights_ = { -0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276, -0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766, -0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774, 0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355}; cell_to_output_weights_ = { 0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764, -0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544, -0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817, 0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733}; projection_weights_ = { -0.009802181, 0.09401916, 0.0717386, -0.13895074, 0.09641832, 0.060420845, 0.08539281, 0.054285463, 0.061395317, 0.034448683, -0.042991187, 0.019801661, -0.16840284, -0.015726732, -0.23041931, -0.024478018, -0.10959692, -0.013875541, 0.18600968, -0.061274476, 0.0138165, -0.08160894, -0.07661644, 0.032372914, 0.16169067, 0.22465782, -0.03993472, -0.004017731, 0.08633481, -0.28869787, 0.08682067, 0.17240396, 0.014975425, 0.056431185, 0.031037588, 0.16702051, 0.0077946745, 0.15140012, 0.29405436, 0.120285, -0.188994, -0.027265169, 0.043389652, -0.022061434, 0.014777949, -0.20203483, 0.094781205, 0.19100232, 0.13987629, -0.036132768, -0.06426278, -0.05108664, 0.13221376, 0.009441198, -0.16715929, 0.15859416, -0.040437475, 0.050779544, -0.022187516, 0.012166504, 0.027685808, -0.07675938, -0.0055694645, -0.09444123, 0.0046453946, 0.050794356, 0.10770313, -0.20790008, -0.07149004, -0.11425117, 0.008225835, -0.035802525, 0.14374903, 0.15262283, 0.048710253, 0.1847461, -0.007487823, 0.11000021, -0.09542012, 0.22619456, -0.029149994, 0.08527916, 0.009043713, 0.0042746216, 0.016261552, 0.022461696, 0.12689082, -0.043589946, -0.12035478, -0.08361797, -0.050666027, -0.1248618, -0.1275799, -0.071875185, 0.07377272, 0.09944291, -0.18897448, -0.1593054, -0.06526116, -0.040107165, -0.004618631, -0.067624845, -0.007576253, 0.10727444, 0.041546922, -0.20424393, 0.06907816, 0.050412357, 0.00724631, 0.039827548, 0.12449835, 0.10747581, 0.13708383, 0.09134148, -0.12617786, -0.06428341, 0.09956831, 0.1208086, -0.14676677, -0.0727722, 0.1126304, 0.010139365, 0.015571211, -0.038128063, 0.022913318, -0.042050496, 0.16842307, -0.060597885, 0.10531834, -0.06411776, -0.07451711, -0.03410368, -0.13393489, 0.06534304, 0.003620307, 0.04490757, 0.05970546, 0.05197996, 0.02839995, 0.10434969, -0.013699693, -0.028353551, -0.07260381, 0.047201227, -0.024575593, -0.036445823, 0.07155557, 0.009672501, -0.02328883, 0.009533515, -0.03606021, -0.07421458, -0.028082801, -0.2678904, -0.13221288, 0.18419984, -0.13012612, -0.014588381, -0.035059117, -0.04824723, 0.07830115, -0.056184657, 0.03277091, 0.025466874, 0.14494097, -0.12522776, -0.098633975, -0.10766018, -0.08317623, 0.08594209, 0.07749552, 0.039474737, 0.1776665, -0.07409566, -0.0477268, 0.29323658, 0.10801441, 0.1154011, 0.013952499, 0.10739139, 0.10708251, -0.051456142, 0.0074137426, -0.10430189, 0.10034707, 0.045594677, 0.0635285, -0.0715442, -0.089667566, -0.10811871, 0.00026344223, 0.08298446, -0.009525053, 0.006585689, -0.24567553, -0.09450807, 0.09648481, 0.026996298, -0.06419476, -0.04752702, -0.11063944, -0.23441927, -0.17608605, -0.052156363, 0.067035615, 0.19271925, -0.0032889997, -0.043264326, 0.09663576, -0.057112187, -0.10100678, 0.0628376, 0.04447668, 0.017961001, -0.10094388, -0.10190601, 0.18335468, 0.10494553, -0.052095775, -0.0026118709, 0.10539724, -0.04383912, -0.042349473, 0.08438151, -0.1947263, 0.02251204, 0.11216432, -0.10307853, 0.17351969, -0.039091777, 0.08066188, -0.00561982, 0.12633002, 0.11335965, -0.0088127935, -0.019777594, 0.06864014, -0.059751723, 0.016233567, -0.06894641, -0.28651384, -0.004228674, 0.019708522, -0.16305895, -0.07468996, -0.0855457, 0.099339016, -0.07580735, -0.13775392, 0.08434318, 0.08330512, -0.12131499, 0.031935584, 0.09180414, -0.08876437, -0.08049874, 0.008753825, 0.03498998, 0.030215185, 0.03907079, 0.089751154, 0.029194152, -0.03337423, -0.019092513, 0.04331237, 0.04299654, -0.036394123, -0.12915532, 0.09793732, 0.07512415, -0.11319543, -0.032502122, 0.15661901, 0.07671967, -0.005491124, -0.19379048, -0.218606, 0.21448623, 0.017840758, 0.1416943, -0.07051762, 0.19488361, 0.02664691, -0.18104725, -0.09334311, 0.15026465, -0.15493552, -0.057762887, -0.11604192, -0.262013, -0.01391798, 0.012185008, 0.11156489, -0.07483202, 0.06693364, -0.26151478, 0.046425626, 0.036540434, -0.16435726, 0.17338543, -0.21401681, -0.11385144, -0.08283257, -0.069031075, 0.030635102, 0.010969227, 0.11109743, 0.010919218, 0.027526086, 0.13519906, 0.01891392, -0.046839405, -0.040167913, 0.017953383, -0.09700955, 0.0061885654, -0.07000971, 0.026893595, -0.038844477, 0.14543656}; projection_bias_ = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}; lstm_input_ = { { 0.787926, 0.151646, 0.071352, 0.118426, 0.458058, 0.596268, 0.998386, 0.568695, 0.864524, 0.571277, 0.073204, 0.296072, 0.743333, 0.069199, 0.045348, 0.867394, 0.291279, 0.013714, 0.482521, 0.626339}, { 0.295743, 0.544053, 0.690064, 0.858138, 0.497181, 0.642421, 0.524260, 0.134799, 0.003639, 0.162482, 0.640394, 0.930399, 0.050782, 0.432485, 0.988078, 0.082922, 0.563329, 0.865614, 0.333232, 0.259916} }; lstm_golden_output_ = { { 0.0960319489, 0.229351997, 0.297207743, 0.415997744, 0.491644233, 0.578822136, 0.728351235, 0.788540304, 0.909073055, 0.975599587, 1.08478093, 1.17409372, 1.30914319, 1.4041512, 1.51714694, 1.61342025, 0.0634541437, 0.190279216, 0.317923307, 0.415168911, 0.458113253, 0.609743774, 0.731511116, 0.795806408, 0.876155913, 0.960330188, 1.12396312, 1.22149014, 1.33917773, 1.43213499, 1.54139447, 1.65451813, 0.0485293195, 0.160991609, 0.337073475, 0.428976893, 0.459505379, 0.617044866, 0.743735075, 0.790821671, 0.85271728, 0.946818829, 1.12779701, 1.23345077, 1.35309088, 1.44595909, 1.56173062, 1.67839324, 0.0445971154, 0.156434938, 0.341761589, 0.425259203, 0.449760497, 0.633765697, 0.745093822, 0.791106999, 0.84820503, 0.952787101, 1.13438797, 1.24063754, 1.34668994, 1.44879568, 1.57038593, 1.67956686}, { 0.0861309841, 0.228726774, 0.296653062, 0.40733397, 0.47120741, 0.581307411, 0.719366193, 0.788456261, 0.904226124, 0.965476751, 1.10223258, 1.19042683, 1.32106233, 1.41333091, 1.51509535, 1.62168002, 0.0652779415, 0.18218407, 0.324066937, 0.42611438, 0.47292757, 0.602282405, 0.739310443, 0.791508496, 0.870626807, 0.955534995, 1.10976851, 1.21598971, 1.34197009, 1.43256509, 1.54804492, 1.65581059, 0.0492607877, 0.169714347, 0.332315415, 0.419173867, 0.44699502, 0.630063772, 0.737177074, 0.792844594, 0.858417571, 0.956391335, 1.13453305, 1.23976779, 1.34693861, 1.4410423, 1.55988359, 1.67204297, 0.0390465111, 0.15099439, 0.3439475, 0.424439192, 0.444207728, 0.632501483, 0.742233515, 0.791400731, 0.845713973, 0.944575012, 1.14116096, 1.24791968, 1.35954499, 1.45086145, 1.56633317, 1.68943977}}; } }; TEST_F(NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest, LstmBlackBoxTest) { const int n_batch = 2; const int n_input = 5; const int n_cell = 20; const int n_output = 16; const int sequence_length = 4; UnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, true, true, true, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {n_output, n_cell}, {n_output}, {n_batch, n_output}, {n_batch, n_cell}, }); lstm.SetInputToInputWeights(input_to_input_weights_); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetInputGateBias(input_gate_bias_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToInputWeights(cell_to_input_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); lstm.SetProjectionWeights(projection_weights_); lstm.SetProjectionBias(projection_bias_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } class LayerNormUnidirectionalLSTMOpModel : public UnidirectionalLSTMOpModel { public: LayerNormUnidirectionalLSTMOpModel( int n_batch, int n_input, int n_cell, int n_output, int sequence_length, bool time_major, bool use_cifg, bool use_peephole, bool use_projection_weights, bool use_projection_bias, float cell_clip, float proj_clip, const std::vector<std::vector<int>>& input_shapes, const TensorType& weights_type = TensorType_FLOAT32) : UnidirectionalLSTMOpModel( n_batch, n_input, n_cell, n_output, sequence_length, time_major, use_cifg, use_peephole, use_projection_weights, use_projection_bias, cell_clip, proj_clip, input_shapes, TensorType_FLOAT32, true) {} }; class BaseLayerNormUnidirectionalLstmTest : public ::testing::Test { protected: std::vector<float> input_to_input_weights_; std::vector<float> input_to_cell_weights_; std::vector<float> input_to_forget_weights_; std::vector<float> input_to_output_weights_; std::vector<float> input_gate_bias_; std::vector<float> cell_gate_bias_; std::vector<float> forget_gate_bias_; std::vector<float> output_gate_bias_; std::vector<float> recurrent_to_input_weights_; std::vector<float> recurrent_to_cell_weights_; std::vector<float> recurrent_to_forget_weights_; std::vector<float> recurrent_to_output_weights_; std::vector<float> cell_to_input_weights_; std::vector<float> cell_to_forget_weights_; std::vector<float> cell_to_output_weights_; std::vector<float> projection_weights_; std::vector<float> projection_bias_; std::vector<float> input_layer_norm_coefficients_; std::vector<float> forget_layer_norm_coefficients_; std::vector<float> cell_layer_norm_coefficients_; std::vector<float> output_layer_norm_coefficients_; std::vector<std::vector<float>> lstm_input_; std::vector<std::vector<float>> lstm_golden_output_; void VerifyGoldens(const std::vector<std::vector<float>>& input, const std::vector<std::vector<float>>& output, UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5) { const int num_batches = input.size(); EXPECT_GT(num_batches, 0); const int num_inputs = lstm->num_inputs(); EXPECT_GT(num_inputs, 0); const int input_sequence_size = input[0].size() / num_inputs; EXPECT_GT(input_sequence_size, 0); for (int i = 0; i < input_sequence_size; ++i) { for (int b = 0; b < num_batches; ++b) { const float* batch_start = input[b].data() + i * num_inputs; const float* batch_end = batch_start + num_inputs; lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start, batch_end); } } ASSERT_EQ(lstm->Invoke(), kTfLiteOk); const int num_outputs = lstm->num_outputs(); EXPECT_GT(num_outputs, 0); std::vector<float> expected; for (int i = 0; i < input_sequence_size; ++i) { for (int b = 0; b < num_batches; ++b) { const float* golden_start_batch = output[b].data() + i * num_outputs; const float* golden_end_batch = golden_start_batch + num_outputs; expected.insert(expected.end(), golden_start_batch, golden_end_batch); } } EXPECT_THAT(lstm->GetOutput(), ElementsAreArray(ArrayFloatNear(expected, tolerance))); } }; class CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest : public BaseLayerNormUnidirectionalLstmTest { void SetUp() override { input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726, 0.05100781, 0.04717243, 0.48944736, -0.38535351, -0.17212132}; input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988, -0.3633365, -0.22755712, 0.28253698, 0.24407166, 0.33826375}; input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593, -0.09426838, -0.44257352, 0.54939759, 0.01533556, 0.42751634}; cell_gate_bias_ = {0., 0., 0., 0.}; forget_gate_bias_ = {1., 1., 1., 1.}; output_gate_bias_ = {0., 0., 0., 0.}; recurrent_to_cell_weights_ = { 0.54066205, -0.32668582, -0.43562764, -0.56094903, 0.42957711, 0.01841056, -0.32764608, -0.33027974, -0.10826075, 0.20675004, 0.19069612, -0.03026325, -0.54532051, 0.33003211, 0.44901288, 0.21193194}; recurrent_to_forget_weights_ = { -0.13832897, -0.0515101, -0.2359007, -0.16661474, -0.14340827, 0.36986142, 0.23414481, 0.55899, 0.10798943, -0.41174671, 0.17751795, -0.34484994, -0.35874045, -0.11352962, 0.27268326, 0.54058349}; recurrent_to_output_weights_ = { 0.41613156, 0.42610586, -0.16495961, -0.5663873, 0.30579174, -0.05115908, -0.33941799, 0.23364776, 0.11178309, 0.09481031, -0.26424935, 0.46261835, 0.50248802, 0.26114327, -0.43736315, 0.33149987}; cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408, 0.31544167}; cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703, -0.77109635}; input_layer_norm_coefficients_ = {0.1, 0.2, 0.3, 0.5}; forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3}; cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8}; output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5}; lstm_input_ = {{2., 3., 3., 4., 1., 1.}}; lstm_golden_output_ = {{-0.102089, 0.00653987, 0.0515139, -0.0630045, -0.173317, 0.0109206, 0.0903292, -0.109497, -0.23827, 0.0119514, 0.119525, -0.12748}}; } }; TEST_F(CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest, LayerNormLstmBlackBoxTest) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; LayerNormUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, true, true, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {0, 0}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {0, 0}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {n_cell}, {n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, }); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients_); lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_); lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest, NonLayerNormLstmBlackBoxTest) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; LayerNormUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, true, true, false, false, 0.0, 0.0, { {sequence_length, n_batch, n_input}, {0, 0}, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, {0, 0}, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {0}, {n_cell}, {n_cell}, {0}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, {0}, {0}, {0}, {0}, }); lstm.SetInputToCellWeights(input_to_cell_weights_); lstm.SetInputToForgetWeights(input_to_forget_weights_); lstm.SetInputToOutputWeights(input_to_output_weights_); lstm.SetCellBias(cell_gate_bias_); lstm.SetForgetGateBias(forget_gate_bias_); lstm.SetOutputGateBias(output_gate_bias_); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_); lstm.SetCellToForgetWeights(cell_to_forget_weights_); lstm.SetCellToOutputWeights(cell_to_output_weights_); VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm); } class UnidirectionalSequenceLSTMIntegerOpModel : public SingleOpModel { public: UnidirectionalSequenceLSTMIntegerOpModel( int n_batch, int n_input, int n_cell, int n_output, int sequence_length, bool time_major, bool use_cifg, bool use_peephole, bool use_projection_weights, bool use_projection_bias, bool use_layer_norm, bool use_8x8_8_implementation, const std::vector<std::pair<float, float>>& ranges, const std::vector<std::pair<float, int>>& intermediates, bool asymmetric_quantize_inputs = false) : n_input_(n_input), n_output_(n_output) { input_ = AddInput({TensorType_INT8, {sequence_length, n_batch, n_input}, ranges[0].first, ranges[0].second}); if (use_cifg) { input_to_input_weights_ = AddNullInput(); } else { input_to_input_weights_ = AddInput({TensorType_INT8, {n_cell, n_input}, ranges[1].first, ranges[1].second}); } input_to_forget_weights_ = AddInput({TensorType_INT8, {n_cell, n_input}, ranges[2].first, ranges[2].second}); input_to_cell_weights_ = AddInput({TensorType_INT8, {n_cell, n_input}, ranges[3].first, ranges[3].second}); input_to_output_weights_ = AddInput({TensorType_INT8, {n_cell, n_input}, ranges[4].first, ranges[4].second}); if (use_cifg) { recurrent_to_input_weights_ = AddNullInput(); } else { recurrent_to_input_weights_ = AddInput({TensorType_INT8, {n_cell, n_output}, ranges[5].first, ranges[5].second}); } recurrent_to_forget_weights_ = AddInput({TensorType_INT8, {n_cell, n_output}, ranges[6].first, ranges[6].second}); recurrent_to_cell_weights_ = AddInput({TensorType_INT8, {n_cell, n_output}, ranges[7].first, ranges[7].second}); recurrent_to_output_weights_ = AddInput({TensorType_INT8, {n_cell, n_output}, ranges[8].first, ranges[8].second}); if (use_peephole) { if (use_cifg) { cell_to_input_weights_ = AddNullInput(); } else { cell_to_input_weights_ = AddInput( {TensorType_INT16, {n_cell}, ranges[9].first, ranges[9].second}); } cell_to_forget_weights_ = AddInput( {TensorType_INT16, {n_cell}, ranges[10].first, ranges[10].second}); cell_to_output_weights_ = AddInput( {TensorType_INT16, {n_cell}, ranges[11].first, ranges[11].second}); } else { cell_to_input_weights_ = AddNullInput(); cell_to_forget_weights_ = AddNullInput(); cell_to_output_weights_ = AddNullInput(); } if (use_cifg) { input_gate_bias_ = AddNullInput(); } else { input_gate_bias_ = AddInput( {TensorType_INT32, {n_cell}, ranges[12].first, ranges[12].second}); } forget_gate_bias_ = AddInput( {TensorType_INT32, {n_cell}, ranges[13].first, ranges[13].second}); cell_gate_bias_ = AddInput( {TensorType_INT32, {n_cell}, ranges[14].first, ranges[14].second}); output_gate_bias_ = AddInput( {TensorType_INT32, {n_cell}, ranges[15].first, ranges[15].second}); if (use_projection_weights) { projection_weights_ = AddInput({TensorType_INT8, {n_output, n_cell}, ranges[16].first, ranges[16].second}); } else { projection_weights_ = AddNullInput(); } if (use_projection_bias) { CHECK(use_projection_weights); projection_bias_ = AddInput( {TensorType_INT32, {n_output}, ranges[17].first, ranges[17].second}); } else { projection_bias_ = AddNullInput(); } AddVariableInput({TensorType_INT16, {n_batch, n_output}, ranges[18].first, ranges[18].second}); AddVariableInput({TensorType_INT16, {n_batch, n_cell}, ranges[19].first, ranges[19].second}); if (use_layer_norm) { if (use_cifg) { input_layer_norm_coefficients_ = AddNullInput(); } else { input_layer_norm_coefficients_ = AddInput( {TensorType_INT16, {n_cell}, ranges[20].first, ranges[20].second}); } forget_layer_norm_coefficients_ = AddInput( {TensorType_INT16, {n_cell}, ranges[21].first, ranges[21].second}); cell_layer_norm_coefficients_ = AddInput( {TensorType_INT16, {n_cell}, ranges[22].first, ranges[22].second}); output_layer_norm_coefficients_ = AddInput( {TensorType_INT16, {n_cell}, ranges[23].first, ranges[23].second}); } CHECK(!use_8x8_8_implementation); EXPECT_EQ(intermediates.size(), 5); for (int i = 0; i < intermediates.size(); ++i) { AddIntermediate(TensorType_INT16, {intermediates[i].first}, {intermediates[i].second}); } output_ = AddOutput({TensorType_INT8, {n_batch, n_output}, ranges[24].first, ranges[24].second}); SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, BuiltinOptions_UnidirectionalSequenceLSTMOptions, CreateUnidirectionalSequenceLSTMOptions( builder_, ActivationFunctionType_TANH, 0.0f, 0.0f, time_major, asymmetric_quantize_inputs) .Union()); BuildInterpreter({}, -1, false, true, false); } void PerformAllocateAndDelegate() { AllocateAndDelegate(true); } void SetInputToInputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(input_to_input_weights_, f); } void SetInputToForgetWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(input_to_forget_weights_, f); } void SetInputToCellWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(input_to_cell_weights_, f); } void SetInputToOutputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(input_to_output_weights_, f); } void SetRecurrentToInputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(recurrent_to_input_weights_, f); } void SetRecurrentToForgetWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(recurrent_to_forget_weights_, f); } void SetRecurrentToCellWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(recurrent_to_cell_weights_, f); } void SetRecurrentToOutputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(recurrent_to_output_weights_, f); } void SetCellToInputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(cell_to_input_weights_, f); } void SetCellToForgetWeights(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(cell_to_forget_weights_, f); } void SetCellToOutputWeights(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(cell_to_output_weights_, f); } void SetInputLayerNormCoefficients(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(input_layer_norm_coefficients_, f); } void SetForgetLayerNormCoefficients(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(forget_layer_norm_coefficients_, f); } void SetCellLayerNormCoefficients(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(cell_layer_norm_coefficients_, f); } void SetOutputLayerNormCoefficients(const std::vector<float>& f) { QuantizeAndPopulate<int16_t>(output_layer_norm_coefficients_, f); } void SetInputGateBias(const std::vector<float>& f) { QuantizeAndPopulate<int32_t>(input_gate_bias_, f); } void SetForgetGateBias(const std::vector<float>& f) { QuantizeAndPopulate<int32_t>(forget_gate_bias_, f); } void SetCellBias(const std::vector<float>& f) { QuantizeAndPopulate<int32_t>(cell_gate_bias_, f); } void SetOutputGateBias(const std::vector<float>& f) { QuantizeAndPopulate<int32_t>(output_gate_bias_, f); } void SetProjectionWeights(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(projection_weights_, f); } void SetProjectionBias(const std::vector<float>& f) { QuantizeAndPopulate<int32_t>(projection_bias_, f); } void SetInput(const std::vector<float>& f) { QuantizeAndPopulate<int8_t>(input_, f); } std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); } int num_inputs() { return n_input_; } int num_outputs() { return n_output_; } protected: int input_; int input_to_input_weights_; int input_to_forget_weights_; int input_to_cell_weights_; int input_to_output_weights_; int recurrent_to_input_weights_; int recurrent_to_forget_weights_; int recurrent_to_cell_weights_; int recurrent_to_output_weights_; int cell_to_input_weights_; int cell_to_forget_weights_; int cell_to_output_weights_; int input_layer_norm_coefficients_; int forget_layer_norm_coefficients_; int cell_layer_norm_coefficients_; int output_layer_norm_coefficients_; int input_gate_bias_; int forget_gate_bias_; int cell_gate_bias_; int output_gate_bias_; int projection_weights_; int projection_bias_; int output_; int n_input_; int n_output_; }; TEST(IntegerUnidirectionalSequenceLstmOpTest, NoCifg_NoPeephole_Projection_LayerNorm) { const int n_batch = 2; const int n_input = 5; const int n_cell = 4; const int n_output = 3; const int sequence_length = 3; const std::vector<float> input_to_input_weights = { 0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1}; const std::vector<float> input_to_forget_weights = { -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5}; const std::vector<float> input_to_cell_weights = { -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6}; const std::vector<float> input_to_output_weights = { -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4}; const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38}; const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1}; const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08}; const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1}; const std::vector<float> recurrent_to_input_weights = { -0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6}; const std::vector<float> recurrent_to_cell_weights = { -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2}; const std::vector<float> recurrent_to_forget_weights = { -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2}; const std::vector<float> recurrent_to_output_weights = { 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2}; const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5}; const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4, 0.3}; const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8}; const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2, 0.5}; const std::vector<float> projection_weights = { -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2}; const std::vector<std::pair<float, float>> ranges = { {-1.0, 127.0 / 128}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1, 1}, {-1, 1}, {-1, 1}, {-100, 100}, {-100, 100}, {-100, 100}, {-100, 100}, {-0.5, 0.5}, {-1, 1}, {-1.0, 32767.0 / 32768}, {-1, 1}, {-1.00001, 1.0}, {-1.00001, 1.0}, {-1.00001, 1.0}, {-1.00001, 1.0}, {-1.0, 32767.0 / 32768}, }; std::vector<std::pair<float, int>> intermediates = { {0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}}; UnidirectionalSequenceLSTMIntegerOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, false, true, false, true, false, ranges, intermediates); lstm.PerformAllocateAndDelegate(); lstm.SetInputToInputWeights(input_to_input_weights); lstm.SetInputToCellWeights(input_to_cell_weights); lstm.SetInputToForgetWeights(input_to_forget_weights); lstm.SetInputToOutputWeights(input_to_output_weights); lstm.SetInputGateBias(input_gate_bias); lstm.SetCellBias(cell_gate_bias); lstm.SetForgetGateBias(forget_gate_bias); lstm.SetOutputGateBias(output_gate_bias); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights); lstm.SetProjectionWeights(projection_weights); lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients); lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients); lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients); lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients); const std::vector<float> lstm_input = { 0.7, 0.8, 0.1, 0.2, 0.3, 0.8, 0.1, 0.2, 0.4, 0.5, 0.2, 0.7, 0.7, 0.1, 0.7, 0.3, 0.2, 0.9, 0.8, 0.1, 0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1, }; const std::vector<int8_t> expected_output = { 127, 127, -108, -67, 127, 127, -128, 127, 127, -128, 127, 127, 127, 127, 127, -128, 127, 127, }; lstm.SetInput(lstm_input); ASSERT_EQ(lstm.Invoke(), kTfLiteOk); EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output)); } TEST(IntegerUnidirectionalSequenceLstmOpTest, NoCifg_Peephole_Projection_LayerNorm) { if (SingleOpModel::GetForceUseNnapi()) { return; } const int n_batch = 2; const int n_input = 5; const int n_cell = 4; const int n_output = 3; const int sequence_length = 3; const std::vector<float> input_to_input_weights = { 0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1}; const std::vector<float> input_to_forget_weights = { -0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5}; const std::vector<float> input_to_cell_weights = { -0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6}; const std::vector<float> input_to_output_weights = { -0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4}; const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38}; const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1}; const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08}; const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1}; const std::vector<float> recurrent_to_input_weights = { -0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6}; const std::vector<float> recurrent_to_cell_weights = { -0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2}; const std::vector<float> recurrent_to_forget_weights = { -0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2}; const std::vector<float> recurrent_to_output_weights = { 0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2}; const std::vector<float> cell_to_input_weights = {0.3, -0.1, 0.1, -0.2}; const std::vector<float> cell_to_forget_weights = {0.2, -0.1, 0.1, -0.2}; const std::vector<float> cell_to_output_weights = {0.3, -0.1, 0.1, -0.3}; const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5}; const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4, 0.3}; const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8}; const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2, 0.5}; const std::vector<float> projection_weights = { -0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2}; const std::vector<std::pair<float, float>> ranges = { {-1.0, 127.0 / 128}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 1.0}, {-0.9, 0.9}, {-1.0, 1.0}, {-1.0, 1.0}, {-0.3, 0.3}, {-0.3, 0.3}, {-0.3, 0.3}, {-100, 100}, {-100, 80}, {-100, 100}, {-100, 100}, {-0.5, 0.5}, {-1, 1}, {-1.0, 32767.0 / 32768}, {-1, 1}, {-0.5, 0.5}, {-0.5, 0.5}, {-1.0, 1.0}, {-1.0, 1.0}, {-1.0, 32767.0 / 32768}, }; std::vector<std::pair<float, int>> intermediates = { {0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}}; UnidirectionalSequenceLSTMIntegerOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, false, true, true, false, true, false, ranges, intermediates); lstm.PerformAllocateAndDelegate(); lstm.SetInputToInputWeights(input_to_input_weights); lstm.SetInputToCellWeights(input_to_cell_weights); lstm.SetInputToForgetWeights(input_to_forget_weights); lstm.SetInputToOutputWeights(input_to_output_weights); lstm.SetInputGateBias(input_gate_bias); lstm.SetCellBias(cell_gate_bias); lstm.SetForgetGateBias(forget_gate_bias); lstm.SetOutputGateBias(output_gate_bias); lstm.SetRecurrentToInputWeights(recurrent_to_input_weights); lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights); lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights); lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights); lstm.SetCellToInputWeights(cell_to_input_weights); lstm.SetCellToForgetWeights(cell_to_forget_weights); lstm.SetCellToOutputWeights(cell_to_output_weights); lstm.SetProjectionWeights(projection_weights); lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients); lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients); lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients); lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients); const std::vector<float> lstm_input = { 0.7, 0.8, 0.1, 0.2, 0.3, 0.8, 0.1, 0.2, 0.4, 0.5, 0.2, 0.7, 0.7, 0.1, 0.7, 0.3, 0.2, 0.9, 0.8, 0.1, 0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1, }; const std::vector<int8_t> expected_output = { 127, 127, -16, -21, 127, 127, 23, 127, 127, -128, 127, 127, 127, 127, 127, -128, 127, 127, }; lstm.SetInput(lstm_input); ASSERT_EQ(lstm.Invoke(), kTfLiteOk); EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output)); } class IndyLSTMOpTest : public ::testing::TestWithParam<std::tuple<bool, bool, bool>> {}; INSTANTIATE_TEST_SUITE_P( PeepHoleAndCifg, IndyLSTMOpTest, testing::Combine(testing::Bool(), testing::Bool(), testing::Bool())); TEST_P(IndyLSTMOpTest, HybridCheckThatDiagAndNonDiagRecurrentWeightsAreEqual) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; auto params = GetParam(); const bool use_cifg = std::get<0>(params); const bool use_peephole = std::get<1>(params); const bool asymmetric_quantize_inputs = std::get<2>(params); auto SetLstmWeights = [&](HybridUnidirectionalLSTMOpModel& model) -> void { if (!use_cifg) { model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524}); } model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778}); model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212}); model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578}); if (!use_cifg) { model.SetInputGateBias({0., 0., 0., 0.}); } model.SetCellBias({0., 0., 0., 0.}); model.SetForgetGateBias({1., 1., 1., 1.}); model.SetOutputGateBias({0., 0., 0., 0.}); if (use_peephole) { if (!use_cifg) { model.SetCellToInputWeights( {0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458, -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174, -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047, 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175}); } model.SetCellToForgetWeights( {0.47485286, -0.51955009, -0.24458408, 0.31544167}); model.SetCellToOutputWeights( {-0.17135078, 0.82760304, 0.85573703, -0.77109635}); } }; std::vector<int> input_weights_shape{n_cell, n_input}; if (use_cifg) { input_weights_shape = std::vector<int>{0, 0}; } std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output}; if (use_cifg) { input_weights_shape = std::vector<int>{0, 0}; } std::vector<std::vector<int>> input_shapes = { {sequence_length, n_batch, n_input}, input_weights_shape, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, recurrent_to_input_weights_shape, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {(use_peephole & !use_cifg) ? n_cell : 0}, {use_peephole ? n_cell : 0}, {use_peephole ? n_cell : 0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }; HybridUnidirectionalLSTMOpModel lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, use_cifg, use_peephole, false, false, 0.0, 0.0, input_shapes, TensorType_UINT8, asymmetric_quantize_inputs, false); if (!use_cifg) { lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0, 0.0, 0.08183324, 0.0, 0.0, 0.0, 0.0, 0.48091322, 0.0, 0.0, 0.0, 0.0, 0.10629296}); } lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0, 0.0, -0.00123841, 0.0, 0.0, 0.0, 0.0, -0.501764, 0.0, 0.0, 0.0, 0.0, -0.16368064}); lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0, 0.0, 0.20864892, 0.0, 0.0, 0.0, 0.0, 0.36447752, 0.0, 0.0, 0.0, 0.0, -0.01140004}); lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0, 0.0, -0.39835793, 0.0, 0.0, 0.0, 0.0, 0.20047462, 0.0, 0.0, 0.0, 0.0, 0.39922136}); input_shapes[5] = {n_cell}; input_shapes[6] = {n_cell}; input_shapes[7] = {n_cell}; input_shapes[8] = {n_cell}; HybridUnidirectionalLSTMOpModel indy_lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, use_cifg, use_peephole, false, false, 0.0, 0.0, input_shapes, TensorType_UINT8, asymmetric_quantize_inputs, true); SetLstmWeights(lstm); SetLstmWeights(indy_lstm); if (!use_cifg) { indy_lstm.SetRecurrentToInputWeights( {-0.0063535, 0.08183324, 0.48091322, 0.10629296}); } indy_lstm.SetRecurrentToCellWeights( {-0.3407414, -0.00123841, -0.501764, -0.16368064}); indy_lstm.SetRecurrentToForgetWeights( {-0.48684245, 0.20864892, 0.36447752, -0.01140004}); indy_lstm.SetRecurrentToOutputWeights( {0.43385774, -0.39835793, 0.20047462, 0.39922136}); static float lstm_input[] = {2., 3., 3., 4., 1., 1.}; float* batch0_start = lstm_input; float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length(); lstm.SetInput(0, batch0_start, batch0_end); indy_lstm.SetInput(0, batch0_start, batch0_end); ASSERT_EQ(lstm.Invoke(), kTfLiteOk); ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk); EXPECT_THAT(indy_lstm.GetOutput(), ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-3))); } TEST_P(IndyLSTMOpTest, CheckThatDiagAndNonDiagRecurrentWeightsAreEqual) { const int n_batch = 1; const int n_input = 2; const int n_cell = 4; const int n_output = 4; const int sequence_length = 3; auto params = GetParam(); const bool use_cifg = std::get<0>(params); const bool use_peephole = std::get<1>(params); auto SetLstmWeights = [&](UnidirectionalLSTMOpModel& model) -> void { if (!use_cifg) { model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524}); } model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778}); model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212}); model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578}); if (!use_cifg) { model.SetInputGateBias({0., 0., 0., 0.}); } model.SetCellBias({0., 0., 0., 0.}); model.SetForgetGateBias({1., 1., 1., 1.}); model.SetOutputGateBias({0., 0., 0., 0.}); if (use_peephole) { if (!use_cifg) { model.SetCellToInputWeights( {0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458, -0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174, -0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047, 0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175}); } model.SetCellToForgetWeights( {0.47485286, -0.51955009, -0.24458408, 0.31544167}); model.SetCellToOutputWeights( {-0.17135078, 0.82760304, 0.85573703, -0.77109635}); } }; std::vector<int> input_weights_shape{n_cell, n_input}; if (use_cifg) { input_weights_shape = std::vector<int>{0, 0}; } std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output}; if (use_cifg) { input_weights_shape = std::vector<int>{0, 0}; } std::vector<std::vector<int>> input_shapes = { {sequence_length, n_batch, n_input}, input_weights_shape, {n_cell, n_input}, {n_cell, n_input}, {n_cell, n_input}, recurrent_to_input_weights_shape, {n_cell, n_output}, {n_cell, n_output}, {n_cell, n_output}, {(use_peephole & !use_cifg) ? n_cell : 0}, {use_peephole ? n_cell : 0}, {use_peephole ? n_cell : 0}, {n_cell}, {n_cell}, {n_cell}, {n_cell}, {0, 0}, {0}, {n_batch, n_output}, {n_batch, n_cell}, }; UnidirectionalLSTMOpModel lstm(n_batch, n_input, n_cell, n_output, sequence_length, true, use_cifg, use_peephole, false, false, 0.0, 0.0, input_shapes); SetLstmWeights(lstm); if (!use_cifg) { lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0, 0.0, 0.08183324, 0.0, 0.0, 0.0, 0.0, 0.48091322, 0.0, 0.0, 0.0, 0.0, 0.10629296}); } lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0, 0.0, -0.00123841, 0.0, 0.0, 0.0, 0.0, -0.501764, 0.0, 0.0, 0.0, 0.0, -0.16368064}); lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0, 0.0, 0.20864892, 0.0, 0.0, 0.0, 0.0, 0.36447752, 0.0, 0.0, 0.0, 0.0, -0.01140004}); lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0, 0.0, -0.39835793, 0.0, 0.0, 0.0, 0.0, 0.20047462, 0.0, 0.0, 0.0, 0.0, 0.39922136}); input_shapes[5] = {n_cell}; input_shapes[6] = {n_cell}; input_shapes[7] = {n_cell}; input_shapes[8] = {n_cell}; UnidirectionalLSTMOpModel indy_lstm( n_batch, n_input, n_cell, n_output, sequence_length, true, use_cifg, use_peephole, false, false, 0.0, 0.0, input_shapes, TensorType_FLOAT32, false, false, true); SetLstmWeights(lstm); SetLstmWeights(indy_lstm); if (!use_cifg) { indy_lstm.SetRecurrentToInputWeights( {-0.0063535, 0.08183324, 0.48091322, 0.10629296}); } indy_lstm.SetRecurrentToCellWeights( {-0.3407414, -0.00123841, -0.501764, -0.16368064}); indy_lstm.SetRecurrentToForgetWeights( {-0.48684245, 0.20864892, 0.36447752, -0.01140004}); indy_lstm.SetRecurrentToOutputWeights( {0.43385774, -0.39835793, 0.20047462, 0.39922136}); static float lstm_input[] = {2., 3., 3., 4., 1., 1.}; float* batch0_start = lstm_input; float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length(); lstm.SetInput(0, batch0_start, batch0_end); indy_lstm.SetInput(0, batch0_start, batch0_end); ASSERT_EQ(lstm.Invoke(), kTfLiteOk); ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk); EXPECT_THAT(indy_lstm.GetOutput(), ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-6))); } #define QUANTIZE_PARAMETER_TEST(test) \ INSTANTIATE_TEST_SUITE_P(test, test, ::testing::ValuesIn({false, true})); QUANTIZE_PARAMETER_TEST( CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest); QUANTIZE_PARAMETER_TEST( NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest); QUANTIZE_PARAMETER_TEST(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest); #undef QUANTIZE_PARAMETER_TEST } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d3449137-bd0a-42dc-b8f7-95502deb4561
cpp
google/quiche
uber_quic_stream_id_manager
quiche/quic/core/uber_quic_stream_id_manager.cc
quiche/quic/core/uber_quic_stream_id_manager_test.cc
#include "quiche/quic/core/uber_quic_stream_id_manager.h" #include <string> #include "quiche/quic/core/quic_session.h" #include "quiche/quic/core/quic_utils.h" namespace quic { UberQuicStreamIdManager::UberQuicStreamIdManager( Perspective perspective, ParsedQuicVersion version, QuicStreamIdManager::DelegateInterface* delegate, QuicStreamCount max_open_outgoing_bidirectional_streams, QuicStreamCount max_open_outgoing_unidirectional_streams, QuicStreamCount max_open_incoming_bidirectional_streams, QuicStreamCount max_open_incoming_unidirectional_streams) : version_(version), bidirectional_stream_id_manager_(delegate, false, perspective, version, max_open_outgoing_bidirectional_streams, max_open_incoming_bidirectional_streams), unidirectional_stream_id_manager_( delegate, true, perspective, version, max_open_outgoing_unidirectional_streams, max_open_incoming_unidirectional_streams) {} bool UberQuicStreamIdManager::MaybeAllowNewOutgoingBidirectionalStreams( QuicStreamCount max_open_streams) { return bidirectional_stream_id_manager_.MaybeAllowNewOutgoingStreams( max_open_streams); } bool UberQuicStreamIdManager::MaybeAllowNewOutgoingUnidirectionalStreams( QuicStreamCount max_open_streams) { return unidirectional_stream_id_manager_.MaybeAllowNewOutgoingStreams( max_open_streams); } void UberQuicStreamIdManager::SetMaxOpenIncomingBidirectionalStreams( QuicStreamCount max_open_streams) { bidirectional_stream_id_manager_.SetMaxOpenIncomingStreams(max_open_streams); } void UberQuicStreamIdManager::SetMaxOpenIncomingUnidirectionalStreams( QuicStreamCount max_open_streams) { unidirectional_stream_id_manager_.SetMaxOpenIncomingStreams(max_open_streams); } bool UberQuicStreamIdManager::CanOpenNextOutgoingBidirectionalStream() const { return bidirectional_stream_id_manager_.CanOpenNextOutgoingStream(); } bool UberQuicStreamIdManager::CanOpenNextOutgoingUnidirectionalStream() const { return unidirectional_stream_id_manager_.CanOpenNextOutgoingStream(); } QuicStreamId UberQuicStreamIdManager::GetNextOutgoingBidirectionalStreamId() { return bidirectional_stream_id_manager_.GetNextOutgoingStreamId(); } QuicStreamId UberQuicStreamIdManager::GetNextOutgoingUnidirectionalStreamId() { return unidirectional_stream_id_manager_.GetNextOutgoingStreamId(); } bool UberQuicStreamIdManager::MaybeIncreaseLargestPeerStreamId( QuicStreamId id, std::string* error_details) { if (QuicUtils::IsBidirectionalStreamId(id, version_)) { return bidirectional_stream_id_manager_.MaybeIncreaseLargestPeerStreamId( id, error_details); } return unidirectional_stream_id_manager_.MaybeIncreaseLargestPeerStreamId( id, error_details); } void UberQuicStreamIdManager::OnStreamClosed(QuicStreamId id) { if (QuicUtils::IsBidirectionalStreamId(id, version_)) { bidirectional_stream_id_manager_.OnStreamClosed(id); return; } unidirectional_stream_id_manager_.OnStreamClosed(id); } bool UberQuicStreamIdManager::OnStreamsBlockedFrame( const QuicStreamsBlockedFrame& frame, std::string* error_details) { if (frame.unidirectional) { return unidirectional_stream_id_manager_.OnStreamsBlockedFrame( frame, error_details); } return bidirectional_stream_id_manager_.OnStreamsBlockedFrame(frame, error_details); } bool UberQuicStreamIdManager::IsAvailableStream(QuicStreamId id) const { if (QuicUtils::IsBidirectionalStreamId(id, version_)) { return bidirectional_stream_id_manager_.IsAvailableStream(id); } return unidirectional_stream_id_manager_.IsAvailableStream(id); } void UberQuicStreamIdManager::StopIncreasingIncomingMaxStreams() { unidirectional_stream_id_manager_.StopIncreasingIncomingMaxStreams(); bidirectional_stream_id_manager_.StopIncreasingIncomingMaxStreams(); } void UberQuicStreamIdManager::MaybeSendMaxStreamsFrame() { unidirectional_stream_id_manager_.MaybeSendMaxStreamsFrame(); bidirectional_stream_id_manager_.MaybeSendMaxStreamsFrame(); } QuicStreamCount UberQuicStreamIdManager::GetMaxAllowdIncomingBidirectionalStreams() const { return bidirectional_stream_id_manager_.incoming_initial_max_open_streams(); } QuicStreamCount UberQuicStreamIdManager::GetMaxAllowdIncomingUnidirectionalStreams() const { return unidirectional_stream_id_manager_.incoming_initial_max_open_streams(); } QuicStreamId UberQuicStreamIdManager::GetLargestPeerCreatedStreamId( bool unidirectional) const { if (unidirectional) { return unidirectional_stream_id_manager_.largest_peer_created_stream_id(); } return bidirectional_stream_id_manager_.largest_peer_created_stream_id(); } QuicStreamId UberQuicStreamIdManager::next_outgoing_bidirectional_stream_id() const { return bidirectional_stream_id_manager_.next_outgoing_stream_id(); } QuicStreamId UberQuicStreamIdManager::next_outgoing_unidirectional_stream_id() const { return unidirectional_stream_id_manager_.next_outgoing_stream_id(); } QuicStreamCount UberQuicStreamIdManager::max_outgoing_bidirectional_streams() const { return bidirectional_stream_id_manager_.outgoing_max_streams(); } QuicStreamCount UberQuicStreamIdManager::max_outgoing_unidirectional_streams() const { return unidirectional_stream_id_manager_.outgoing_max_streams(); } QuicStreamCount UberQuicStreamIdManager::max_incoming_bidirectional_streams() const { return bidirectional_stream_id_manager_.incoming_actual_max_streams(); } QuicStreamCount UberQuicStreamIdManager::max_incoming_unidirectional_streams() const { return unidirectional_stream_id_manager_.incoming_actual_max_streams(); } QuicStreamCount UberQuicStreamIdManager::advertised_max_incoming_bidirectional_streams() const { return bidirectional_stream_id_manager_.incoming_advertised_max_streams(); } QuicStreamCount UberQuicStreamIdManager::advertised_max_incoming_unidirectional_streams() const { return unidirectional_stream_id_manager_.incoming_advertised_max_streams(); } QuicStreamCount UberQuicStreamIdManager::outgoing_bidirectional_stream_count() const { return bidirectional_stream_id_manager_.outgoing_stream_count(); } QuicStreamCount UberQuicStreamIdManager::outgoing_unidirectional_stream_count() const { return unidirectional_stream_id_manager_.outgoing_stream_count(); } }
#include "quiche/quic/core/uber_quic_stream_id_manager.h" #include <string> #include <vector> #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/core/quic_versions.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_stream_id_manager_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" using testing::_; using testing::StrictMock; namespace quic { namespace test { namespace { struct TestParams { explicit TestParams(ParsedQuicVersion version, Perspective perspective) : version(version), perspective(perspective) {} ParsedQuicVersion version; Perspective perspective; }; std::string PrintToString(const TestParams& p) { return absl::StrCat( ParsedQuicVersionToString(p.version), "_", (p.perspective == Perspective::IS_CLIENT ? "client" : "server")); } std::vector<TestParams> GetTestParams() { std::vector<TestParams> params; for (const ParsedQuicVersion& version : AllSupportedVersions()) { if (!version.HasIetfQuicFrames()) { continue; } params.push_back(TestParams(version, Perspective::IS_CLIENT)); params.push_back(TestParams(version, Perspective::IS_SERVER)); } return params; } class MockDelegate : public QuicStreamIdManager::DelegateInterface { public: MOCK_METHOD(bool, CanSendMaxStreams, (), (override)); MOCK_METHOD(void, SendMaxStreams, (QuicStreamCount stream_count, bool unidirectional), (override)); }; class UberQuicStreamIdManagerTest : public QuicTestWithParam<TestParams> { protected: UberQuicStreamIdManagerTest() : manager_(perspective(), version(), &delegate_, 0, 0, kDefaultMaxStreamsPerConnection, kDefaultMaxStreamsPerConnection) {} QuicStreamId GetNthClientInitiatedBidirectionalId(int n) { return QuicUtils::GetFirstBidirectionalStreamId(transport_version(), Perspective::IS_CLIENT) + QuicUtils::StreamIdDelta(transport_version()) * n; } QuicStreamId GetNthClientInitiatedUnidirectionalId(int n) { return QuicUtils::GetFirstUnidirectionalStreamId(transport_version(), Perspective::IS_CLIENT) + QuicUtils::StreamIdDelta(transport_version()) * n; } QuicStreamId GetNthServerInitiatedBidirectionalId(int n) { return QuicUtils::GetFirstBidirectionalStreamId(transport_version(), Perspective::IS_SERVER) + QuicUtils::StreamIdDelta(transport_version()) * n; } QuicStreamId GetNthServerInitiatedUnidirectionalId(int n) { return QuicUtils::GetFirstUnidirectionalStreamId(transport_version(), Perspective::IS_SERVER) + QuicUtils::StreamIdDelta(transport_version()) * n; } QuicStreamId GetNthPeerInitiatedBidirectionalStreamId(int n) { return ((perspective() == Perspective::IS_SERVER) ? GetNthClientInitiatedBidirectionalId(n) : GetNthServerInitiatedBidirectionalId(n)); } QuicStreamId GetNthPeerInitiatedUnidirectionalStreamId(int n) { return ((perspective() == Perspective::IS_SERVER) ? GetNthClientInitiatedUnidirectionalId(n) : GetNthServerInitiatedUnidirectionalId(n)); } QuicStreamId GetNthSelfInitiatedBidirectionalStreamId(int n) { return ((perspective() == Perspective::IS_CLIENT) ? GetNthClientInitiatedBidirectionalId(n) : GetNthServerInitiatedBidirectionalId(n)); } QuicStreamId GetNthSelfInitiatedUnidirectionalStreamId(int n) { return ((perspective() == Perspective::IS_CLIENT) ? GetNthClientInitiatedUnidirectionalId(n) : GetNthServerInitiatedUnidirectionalId(n)); } QuicStreamId StreamCountToId(QuicStreamCount stream_count, Perspective perspective, bool bidirectional) { return ((bidirectional) ? QuicUtils::GetFirstBidirectionalStreamId( transport_version(), perspective) : QuicUtils::GetFirstUnidirectionalStreamId( transport_version(), perspective)) + ((stream_count - 1) * QuicUtils::StreamIdDelta(transport_version())); } ParsedQuicVersion version() { return GetParam().version; } QuicTransportVersion transport_version() { return version().transport_version; } Perspective perspective() { return GetParam().perspective; } testing::StrictMock<MockDelegate> delegate_; UberQuicStreamIdManager manager_; }; INSTANTIATE_TEST_SUITE_P(Tests, UberQuicStreamIdManagerTest, ::testing::ValuesIn(GetTestParams()), ::testing::PrintToStringParamName()); TEST_P(UberQuicStreamIdManagerTest, Initialization) { EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(0), manager_.next_outgoing_bidirectional_stream_id()); EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(0), manager_.next_outgoing_unidirectional_stream_id()); } TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenOutgoingStreams) { const size_t kNumMaxOutgoingStream = 123; EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams( kNumMaxOutgoingStream)); EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams( kNumMaxOutgoingStream + 1)); EXPECT_EQ(kNumMaxOutgoingStream, manager_.max_outgoing_bidirectional_streams()); EXPECT_EQ(kNumMaxOutgoingStream + 1, manager_.max_outgoing_unidirectional_streams()); int i = kNumMaxOutgoingStream; while (i) { EXPECT_TRUE(manager_.CanOpenNextOutgoingBidirectionalStream()); manager_.GetNextOutgoingBidirectionalStreamId(); EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream()); manager_.GetNextOutgoingUnidirectionalStreamId(); i--; } EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream()); manager_.GetNextOutgoingUnidirectionalStreamId(); EXPECT_FALSE(manager_.CanOpenNextOutgoingUnidirectionalStream()); EXPECT_FALSE(manager_.CanOpenNextOutgoingBidirectionalStream()); } TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenIncomingStreams) { const size_t kNumMaxIncomingStreams = 456; manager_.SetMaxOpenIncomingUnidirectionalStreams(kNumMaxIncomingStreams); manager_.SetMaxOpenIncomingBidirectionalStreams(kNumMaxIncomingStreams + 1); EXPECT_EQ(kNumMaxIncomingStreams + 1, manager_.GetMaxAllowdIncomingBidirectionalStreams()); EXPECT_EQ(kNumMaxIncomingStreams, manager_.GetMaxAllowdIncomingUnidirectionalStreams()); EXPECT_EQ(manager_.max_incoming_bidirectional_streams(), manager_.advertised_max_incoming_bidirectional_streams()); EXPECT_EQ(manager_.max_incoming_unidirectional_streams(), manager_.advertised_max_incoming_unidirectional_streams()); size_t i; for (i = 0; i < kNumMaxIncomingStreams; i++) { EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedUnidirectionalStreamId(i), nullptr)); EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedBidirectionalStreamId(i), nullptr)); } EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedBidirectionalStreamId(i), nullptr)); std::string error_details; EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedUnidirectionalStreamId(i), &error_details)); EXPECT_EQ(error_details, absl::StrCat( "Stream id ", GetNthPeerInitiatedUnidirectionalStreamId(i), " would exceed stream count limit ", kNumMaxIncomingStreams)); EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedBidirectionalStreamId(i + 1), &error_details)); EXPECT_EQ(error_details, absl::StrCat("Stream id ", GetNthPeerInitiatedBidirectionalStreamId(i + 1), " would exceed stream count limit ", kNumMaxIncomingStreams + 1)); } TEST_P(UberQuicStreamIdManagerTest, GetNextOutgoingStreamId) { EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams(10)); EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams(10)); EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(0), manager_.GetNextOutgoingBidirectionalStreamId()); EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(1), manager_.GetNextOutgoingBidirectionalStreamId()); EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(0), manager_.GetNextOutgoingUnidirectionalStreamId()); EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(1), manager_.GetNextOutgoingUnidirectionalStreamId()); } TEST_P(UberQuicStreamIdManagerTest, AvailableStreams) { EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedBidirectionalStreamId(3), nullptr)); EXPECT_TRUE( manager_.IsAvailableStream(GetNthPeerInitiatedBidirectionalStreamId(1))); EXPECT_TRUE( manager_.IsAvailableStream(GetNthPeerInitiatedBidirectionalStreamId(2))); EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( GetNthPeerInitiatedUnidirectionalStreamId(3), nullptr)); EXPECT_TRUE( manager_.IsAvailableStream(GetNthPeerInitiatedUnidirectionalStreamId(1))); EXPECT_TRUE( manager_.IsAvailableStream(GetNthPeerInitiatedUnidirectionalStreamId(2))); } TEST_P(UberQuicStreamIdManagerTest, MaybeIncreaseLargestPeerStreamId) { EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( StreamCountToId(manager_.max_incoming_bidirectional_streams(), QuicUtils::InvertPerspective(perspective()), true), nullptr)); EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId( StreamCountToId(manager_.max_incoming_bidirectional_streams(), QuicUtils::InvertPerspective(perspective()), false), nullptr)); std::string expected_error_details = perspective() == Perspective::IS_SERVER ? "Stream id 400 would exceed stream count limit 100" : "Stream id 401 would exceed stream count limit 100"; std::string error_details; EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId( StreamCountToId(manager_.max_incoming_bidirectional_streams() + 1, QuicUtils::InvertPerspective(perspective()), true), &error_details)); EXPECT_EQ(expected_error_details, error_details); expected_error_details = perspective() == Perspective::IS_SERVER ? "Stream id 402 would exceed stream count limit 100" : "Stream id 403 would exceed stream count limit 100"; EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId( StreamCountToId(manager_.max_incoming_bidirectional_streams() + 1, QuicUtils::InvertPerspective(perspective()), false), &error_details)); EXPECT_EQ(expected_error_details, error_details); } TEST_P(UberQuicStreamIdManagerTest, OnStreamsBlockedFrame) { QuicStreamCount stream_count = manager_.advertised_max_incoming_bidirectional_streams() - 1; QuicStreamsBlockedFrame frame(kInvalidControlFrameId, stream_count, false); EXPECT_CALL(delegate_, SendMaxStreams(manager_.max_incoming_bidirectional_streams(), frame.unidirectional)) .Times(0); EXPECT_TRUE(manager_.OnStreamsBlockedFrame(frame, nullptr)); stream_count = manager_.advertised_max_incoming_unidirectional_streams() - 1; frame.stream_count = stream_count; frame.unidirectional = true; EXPECT_CALL(delegate_, SendMaxStreams(manager_.max_incoming_unidirectional_streams(), frame.unidirectional)) .Times(0); EXPECT_TRUE(manager_.OnStreamsBlockedFrame(frame, nullptr)); } TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenOutgoingStreamsPlusFrame) { const size_t kNumMaxOutgoingStream = 123; EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams( kNumMaxOutgoingStream)); EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams( kNumMaxOutgoingStream + 1)); EXPECT_EQ(kNumMaxOutgoingStream, manager_.max_outgoing_bidirectional_streams()); EXPECT_EQ(kNumMaxOutgoingStream + 1, manager_.max_outgoing_unidirectional_streams()); int i = kNumMaxOutgoingStream; while (i) { EXPECT_TRUE(manager_.CanOpenNextOutgoingBidirectionalStream()); manager_.GetNextOutgoingBidirectionalStreamId(); EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream()); manager_.GetNextOutgoingUnidirectionalStreamId(); i--; } EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream()); manager_.GetNextOutgoingUnidirectionalStreamId(); EXPECT_FALSE(manager_.CanOpenNextOutgoingUnidirectionalStream()); EXPECT_FALSE(manager_.CanOpenNextOutgoingBidirectionalStream()); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/uber_quic_stream_id_manager.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/uber_quic_stream_id_manager_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
73972d77-4a96-4dd2-9317-ea35ef86c645
cpp
google/libaddressinput
supplier
cpp/include/libaddressinput/supplier.h
cpp/test/supplier_test.cc
#ifndef I18N_ADDRESSINPUT_SUPPLIER_H_ #define I18N_ADDRESSINPUT_SUPPLIER_H_ #include <libaddressinput/callback.h> #include <string> namespace i18n { namespace addressinput { class LookupKey; class Rule; class Supplier { public: struct RuleHierarchy; using Callback = i18n::addressinput::Callback<const LookupKey&, const RuleHierarchy&>; virtual ~Supplier() = default; virtual void Supply(const LookupKey& lookup_key, const Callback& supplied) = 0; virtual void SupplyGlobally(const LookupKey& lookup_key, const Callback& supplied) = 0; virtual size_t GetLoadedRuleDepth(const std::string& region_code) const { return 0; } struct RuleHierarchy { RuleHierarchy() : rule() {} const Rule* rule[4]; }; }; } } #endif
#include <libaddressinput/supplier.h> #include <libaddressinput/address_data.h> #include <libaddressinput/callback.h> #include <libaddressinput/null_storage.h> #include <libaddressinput/ondemand_supplier.h> #include <libaddressinput/preload_supplier.h> #include <cstddef> #include <cstring> #include <memory> #include <string> #include <gtest/gtest.h> #include "lookup_key.h" #include "rule.h" #include "testdata_source.h" #include "util/size.h" namespace { using i18n::addressinput::AddressData; using i18n::addressinput::BuildCallback; using i18n::addressinput::LookupKey; using i18n::addressinput::NullStorage; using i18n::addressinput::OndemandSupplier; using i18n::addressinput::PreloadSupplier; using i18n::addressinput::Rule; using i18n::addressinput::Supplier; using i18n::addressinput::TestdataSource; class SupplierWrapper { public: virtual ~SupplierWrapper() = default; virtual void Supply(const LookupKey& lookup_key, const Supplier::Callback& supplied) = 0; }; class OndemandSupplierWrapper : public SupplierWrapper { public: OndemandSupplierWrapper(const OndemandSupplierWrapper&) = delete; OndemandSupplierWrapper& operator=(const OndemandSupplierWrapper&) = delete; static SupplierWrapper* Build() { return new OndemandSupplierWrapper; } void Supply(const LookupKey& lookup_key, const Supplier::Callback& supplied) override { ondemand_supplier_.Supply(lookup_key, supplied); } private: OndemandSupplierWrapper() : ondemand_supplier_(new TestdataSource(false), new NullStorage) {} OndemandSupplier ondemand_supplier_; }; class PreloadSupplierWrapper : public SupplierWrapper { public: PreloadSupplierWrapper(const PreloadSupplierWrapper&) = delete; PreloadSupplierWrapper& operator=(const PreloadSupplierWrapper&) = delete; static SupplierWrapper* Build() { return new PreloadSupplierWrapper; } void Supply(const LookupKey& lookup_key, const Supplier::Callback& supplied) override { const std::string& region_code = lookup_key.GetRegionCode(); if (!region_code.empty() && !preload_supplier_.IsLoaded(region_code)) { preload_supplier_.LoadRules(region_code, *loaded_); } preload_supplier_.Supply(lookup_key, supplied); } private: PreloadSupplierWrapper() : preload_supplier_(new TestdataSource(true), new NullStorage), loaded_(BuildCallback(this, &PreloadSupplierWrapper::Loaded)) {} void Loaded(bool success, const std::string&, int) { ASSERT_TRUE(success); } PreloadSupplier preload_supplier_; const std::unique_ptr<const PreloadSupplier::Callback> loaded_; }; class SupplierTest : public testing::TestWithParam<SupplierWrapper* (*)()> { public: SupplierTest(const SupplierTest&) = delete; SupplierTest& operator=(const SupplierTest&) = delete; protected: SupplierTest() : address_(), rule_(), called_(false), lookup_key_(), supplier_wrapper_((*GetParam())()), supplied_(BuildCallback(this, &SupplierTest::Supplied)) {} void Supply() { lookup_key_.FromAddress(address_); supplier_wrapper_->Supply(lookup_key_, *supplied_); } AddressData address_; const Rule* rule_[size(LookupKey::kHierarchy)]; bool called_; private: void Supplied(bool success, const LookupKey& lookup_key, const Supplier::RuleHierarchy& hierarchy) { ASSERT_TRUE(success); ASSERT_EQ(&lookup_key_, &lookup_key); std::memcpy(rule_, hierarchy.rule, sizeof rule_); called_ = true; } LookupKey lookup_key_; const std::unique_ptr<SupplierWrapper> supplier_wrapper_; const std::unique_ptr<const Supplier::Callback> supplied_; }; INSTANTIATE_TEST_SUITE_P(OndemandSupplier, SupplierTest, testing::Values(&OndemandSupplierWrapper::Build)); INSTANTIATE_TEST_SUITE_P(PreloadSupplier, SupplierTest, testing::Values(&PreloadSupplierWrapper::Build)); TEST_P(SupplierTest, Invalid) { address_ = {.region_code = "QZ"}; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] == nullptr); EXPECT_TRUE(rule_[1] == nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, Valid) { address_ = {.region_code = "SE"}; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] == nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); EXPECT_EQ("data/SE", rule_[0]->GetId()); EXPECT_FALSE(rule_[0]->GetRequired().empty()); EXPECT_FALSE(rule_[0]->GetFormat().empty()); EXPECT_TRUE(rule_[0]->GetPostalCodeMatcher() != nullptr); } TEST_P(SupplierTest, KeyDepthEqualsMaxDepth) { address_ = { .region_code = "HK", .administrative_area = "九龍", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepthLargerThanMaxDepth) { address_ = { .region_code = "HK", .administrative_area = "九龍", .locality = "bbb", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepthSmallerThanMaxDepth) { address_ = {.region_code = "HK"}; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] == nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepth0) { address_ = {.region_code = "CN"}; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] == nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepth1) { address_ = { .region_code = "CN", .administrative_area = "新疆维吾尔自治区", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepth2) { address_ = { .region_code = "CN", .administrative_area = "新疆维吾尔自治区", .locality = "喀什地区", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] != nullptr); EXPECT_TRUE(rule_[3] == nullptr); } TEST_P(SupplierTest, KeyDepth3) { address_ = { .region_code = "CN", .administrative_area = "新疆维吾尔自治区", .locality = "喀什地区", .dependent_locality = "喀什市", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] != nullptr); EXPECT_TRUE(rule_[3] != nullptr); } TEST_P(SupplierTest, RuleCache) { address_ = { .region_code = "US", .administrative_area = "CA", }; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_TRUE(rule_[0] != nullptr); EXPECT_TRUE(rule_[1] != nullptr); EXPECT_TRUE(rule_[2] == nullptr); EXPECT_TRUE(rule_[3] == nullptr); const Rule* rule[size(LookupKey::kHierarchy)]; std::memcpy(rule, rule_, sizeof rule); called_ = false; ASSERT_NO_FATAL_FAILURE(Supply()); ASSERT_TRUE(called_); EXPECT_EQ(rule[0], rule_[0]); EXPECT_EQ(rule[1], rule_[1]); EXPECT_EQ(rule[2], rule_[2]); EXPECT_EQ(rule[3], rule_[3]); } }
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/include/libaddressinput/supplier.h
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/supplier_test.cc
2610f7b1043d6784ada41392fc9392d1ea09ea07
2f120f00-d9e2-4867-8061-62cad0e7bc44
cpp
tensorflow/tensorflow
neg
tensorflow/lite/kernels/neg.cc
tensorflow/lite/delegates/xnnpack/neg_test.cc
#include "tensorflow/lite/kernels/internal/reference/neg.h" #include <stdint.h> #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace neg { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (input->type) { case kTfLiteInt64: reference_ops::Negate( GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; case kTfLiteInt32: reference_ops::Negate( GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; case kTfLiteFloat32: reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; default: TF_LITE_KERNEL_LOG( context, "Neg only currently supports int64, int32, and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; } } TfLiteRegistration* Register_NEG() { static TfLiteRegistration r = {nullptr, nullptr, neg::Prepare, neg::Eval}; return &r; } } } }
#include <cstdint> #include <functional> #include <memory> #include <random> #include <gtest/gtest.h> #include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h" #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { namespace xnnpack { TEST(Neg, 4D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto height = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); UnaryElementwiseTester() .Shape({batch, height, width, channels}) .Test(BuiltinOperator_NEG, xnnpack_delegate.get()); } TEST(Neg, 3D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); UnaryElementwiseTester() .Shape({batch, width, channels}) .Test(BuiltinOperator_NEG, xnnpack_delegate.get()); } TEST(Neg, 2D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto channels = shape_rng(); UnaryElementwiseTester() .Shape({batch, channels}) .Test(BuiltinOperator_NEG, xnnpack_delegate.get()); } TEST(Neg, 1D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_NEG, xnnpack_delegate.get()); } TEST(Neg, MultiThreading) { TfLiteXNNPackDelegateOptions delegate_options = TfLiteXNNPackDelegateOptionsDefault(); delegate_options.num_threads = 2; std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto height = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); UnaryElementwiseTester() .Shape({batch, height, width, channels}) .Test(BuiltinOperator_NEG, xnnpack_delegate.get()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/neg.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/neg_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
55da78bf-faae-4166-88d4-884db9fc1158
cpp
abseil/abseil-cpp
str_cat
absl/strings/str_cat.cc
absl/strings/str_cat_test.cc
#include "absl/strings/str_cat.h" #include <assert.h> #include <cstddef> #include <cstdint> #include <cstring> #include <initializer_list> #include <limits> #include <string> #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/nullability.h" #include "absl/strings/internal/resize_uninitialized.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out, const AlphaNum& x) { char* after = out + x.size(); if (x.size() != 0) { memcpy(out, x.data(), x.size()); } return after; } inline void STLStringAppendUninitializedAmortized(std::string* dest, size_t to_append) { strings_internal::AppendUninitializedTraits<std::string>::Append(dest, to_append); } } std::string StrCat(const AlphaNum& a, const AlphaNum& b) { std::string result; constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()}; const uint64_t result_size = static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size()); ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow"); absl::strings_internal::STLStringResizeUninitialized( &result, static_cast<size_t>(result_size)); char* const begin = &result[0]; char* out = begin; out = Append(out, a); out = Append(out, b); assert(out == begin + result.size()); return result; } std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) { std::string result; constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()}; const uint64_t result_size = static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size()) + static_cast<uint64_t>(c.size()); ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow"); strings_internal::STLStringResizeUninitialized( &result, static_cast<size_t>(result_size)); char* const begin = &result[0]; char* out = begin; out = Append(out, a); out = Append(out, b); out = Append(out, c); assert(out == begin + result.size()); return result; } std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d) { std::string result; constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()}; const uint64_t result_size = static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size()) + static_cast<uint64_t>(c.size()) + static_cast<uint64_t>(d.size()); ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow"); strings_internal::STLStringResizeUninitialized( &result, static_cast<size_t>(result_size)); char* const begin = &result[0]; char* out = begin; out = Append(out, a); out = Append(out, b); out = Append(out, c); out = Append(out, d); assert(out == begin + result.size()); return result; } namespace strings_internal { std::string CatPieces(std::initializer_list<absl::string_view> pieces) { std::string result; constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()}; uint64_t total_size = 0; for (absl::string_view piece : pieces) { total_size += piece.size(); } ABSL_INTERNAL_CHECK(total_size <= kMaxSize, "size_t overflow"); strings_internal::STLStringResizeUninitialized( &result, static_cast<size_t>(total_size)); char* const begin = &result[0]; char* out = begin; for (absl::string_view piece : pieces) { const size_t this_size = piece.size(); if (this_size != 0) { memcpy(out, piece.data(), this_size); out += this_size; } } assert(out == begin + result.size()); return result; } #define ASSERT_NO_OVERLAP(dest, src) \ assert(((src).size() == 0) || \ (uintptr_t((src).data() - (dest).data()) > uintptr_t((dest).size()))) void AppendPieces(absl::Nonnull<std::string*> dest, std::initializer_list<absl::string_view> pieces) { size_t old_size = dest->size(); size_t to_append = 0; for (absl::string_view piece : pieces) { ASSERT_NO_OVERLAP(*dest, piece); to_append += piece.size(); } STLStringAppendUninitializedAmortized(dest, to_append); char* const begin = &(*dest)[0]; char* out = begin + old_size; for (absl::string_view piece : pieces) { const size_t this_size = piece.size(); if (this_size != 0) { memcpy(out, piece.data(), this_size); out += this_size; } } assert(out == begin + dest->size()); } } void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) { ASSERT_NO_OVERLAP(*dest, a); std::string::size_type old_size = dest->size(); STLStringAppendUninitializedAmortized(dest, a.size()); char* const begin = &(*dest)[0]; char* out = begin + old_size; out = Append(out, a); assert(out == begin + dest->size()); } void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a, const AlphaNum& b) { ASSERT_NO_OVERLAP(*dest, a); ASSERT_NO_OVERLAP(*dest, b); std::string::size_type old_size = dest->size(); STLStringAppendUninitializedAmortized(dest, a.size() + b.size()); char* const begin = &(*dest)[0]; char* out = begin + old_size; out = Append(out, a); out = Append(out, b); assert(out == begin + dest->size()); } void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) { ASSERT_NO_OVERLAP(*dest, a); ASSERT_NO_OVERLAP(*dest, b); ASSERT_NO_OVERLAP(*dest, c); std::string::size_type old_size = dest->size(); STLStringAppendUninitializedAmortized(dest, a.size() + b.size() + c.size()); char* const begin = &(*dest)[0]; char* out = begin + old_size; out = Append(out, a); out = Append(out, b); out = Append(out, c); assert(out == begin + dest->size()); } void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a, const AlphaNum& b, const AlphaNum& c, const AlphaNum& d) { ASSERT_NO_OVERLAP(*dest, a); ASSERT_NO_OVERLAP(*dest, b); ASSERT_NO_OVERLAP(*dest, c); ASSERT_NO_OVERLAP(*dest, d); std::string::size_type old_size = dest->size(); STLStringAppendUninitializedAmortized( dest, a.size() + b.size() + c.size() + d.size()); char* const begin = &(*dest)[0]; char* out = begin + old_size; out = Append(out, a); out = Append(out, b); out = Append(out, c); out = Append(out, d); assert(out == begin + dest->size()); } ABSL_NAMESPACE_END }
#include "absl/strings/str_cat.h" #include <cstddef> #include <cstdint> #include <cstdlib> #include <limits> #include <string> #include <vector> #include "gtest/gtest.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #ifdef __ANDROID__ #define ABSL_EXPECT_DEBUG_DEATH(statement, regex) \ EXPECT_DEBUG_DEATH(statement, ".*") #else #define ABSL_EXPECT_DEBUG_DEATH(statement, regex) \ EXPECT_DEBUG_DEATH(statement, regex) #endif namespace { TEST(StrCat, Ints) { const short s = -1; const uint16_t us = 2; const int i = -3; const unsigned int ui = 4; const long l = -5; const unsigned long ul = 6; const long long ll = -7; const unsigned long long ull = 8; const ptrdiff_t ptrdiff = -9; const size_t size = 10; const intptr_t intptr = -12; const uintptr_t uintptr = 13; std::string answer; answer = absl::StrCat(s, us); EXPECT_EQ(answer, "-12"); answer = absl::StrCat(i, ui); EXPECT_EQ(answer, "-34"); answer = absl::StrCat(l, ul); EXPECT_EQ(answer, "-56"); answer = absl::StrCat(ll, ull); EXPECT_EQ(answer, "-78"); answer = absl::StrCat(ptrdiff, size); EXPECT_EQ(answer, "-910"); answer = absl::StrCat(ptrdiff, intptr); EXPECT_EQ(answer, "-9-12"); answer = absl::StrCat(uintptr, 0); EXPECT_EQ(answer, "130"); } TEST(StrCat, Enums) { enum SmallNumbers { One = 1, Ten = 10 } e = Ten; EXPECT_EQ("10", absl::StrCat(e)); EXPECT_EQ("-5", absl::StrCat(SmallNumbers(-5))); enum class Option { Boxers = 1, Briefs = -1 }; EXPECT_EQ("-1", absl::StrCat(Option::Briefs)); enum class Airplane : uint64_t { Airbus = 1, Boeing = 1000, Canary = 10000000000 }; EXPECT_EQ("10000000000", absl::StrCat(Airplane::Canary)); enum class TwoGig : int32_t { TwoToTheZero = 1, TwoToTheSixteenth = 1 << 16, TwoToTheThirtyFirst = INT32_MIN }; EXPECT_EQ("65536", absl::StrCat(TwoGig::TwoToTheSixteenth)); EXPECT_EQ("-2147483648", absl::StrCat(TwoGig::TwoToTheThirtyFirst)); EXPECT_EQ("-1", absl::StrCat(static_cast<TwoGig>(-1))); enum class FourGig : uint32_t { TwoToTheZero = 1, TwoToTheSixteenth = 1 << 16, TwoToTheThirtyFirst = 1U << 31 }; EXPECT_EQ("65536", absl::StrCat(FourGig::TwoToTheSixteenth)); EXPECT_EQ("2147483648", absl::StrCat(FourGig::TwoToTheThirtyFirst)); EXPECT_EQ("4294967295", absl::StrCat(static_cast<FourGig>(-1))); EXPECT_EQ("10000000000", absl::StrCat(Airplane::Canary)); } TEST(StrCat, Basics) { std::string result; std::string strs[] = {"Hello", "Cruel", "World"}; std::string stdstrs[] = { "std::Hello", "std::Cruel", "std::World" }; absl::string_view pieces[] = {"Hello", "Cruel", "World"}; const char* c_strs[] = { "Hello", "Cruel", "World" }; int32_t i32s[] = {'H', 'C', 'W'}; uint64_t ui64s[] = {12345678910LL, 10987654321LL}; EXPECT_EQ(absl::StrCat(), ""); result = absl::StrCat(false, true, 2, 3); EXPECT_EQ(result, "0123"); result = absl::StrCat(-1); EXPECT_EQ(result, "-1"); result = absl::StrCat(absl::SixDigits(0.5)); EXPECT_EQ(result, "0.5"); result = absl::StrCat(strs[1], pieces[2]); EXPECT_EQ(result, "CruelWorld"); result = absl::StrCat(stdstrs[1], " ", stdstrs[2]); EXPECT_EQ(result, "std::Cruel std::World"); result = absl::StrCat(strs[0], ", ", pieces[2]); EXPECT_EQ(result, "Hello, World"); result = absl::StrCat(strs[0], ", ", strs[1], " ", strs[2], "!"); EXPECT_EQ(result, "Hello, Cruel World!"); result = absl::StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]); EXPECT_EQ(result, "Hello, Cruel World"); result = absl::StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]); EXPECT_EQ(result, "Hello, Cruel World"); result = absl::StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!"); EXPECT_EQ(result, "ASCII 72, 67 87!"); result = absl::StrCat(ui64s[0], ", ", ui64s[1], "!"); EXPECT_EQ(result, "12345678910, 10987654321!"); std::string one = "1"; result = absl::StrCat("And a ", one.size(), " and a ", &result[2] - &result[0], " and a ", one, " 2 3 4", "!"); EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!"); result = absl::StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0); EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33"); float f = 100000.5; result = absl::StrCat("A hundred K and a half is ", absl::SixDigits(f)); EXPECT_EQ(result, "A hundred K and a half is 100000"); f = 100001.5; result = absl::StrCat("A hundred K and one and a half is ", absl::SixDigits(f)); EXPECT_EQ(result, "A hundred K and one and a half is 100002"); double d = 100000.5; d *= d; result = absl::StrCat("A hundred K and a half squared is ", absl::SixDigits(d)); EXPECT_EQ(result, "A hundred K and a half squared is 1.00001e+10"); result = absl::StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999); EXPECT_EQ(result, "12333444455555666666777777788888888999999999"); } TEST(StrCat, CornerCases) { std::string result; result = absl::StrCat(""); EXPECT_EQ(result, ""); result = absl::StrCat("", ""); EXPECT_EQ(result, ""); result = absl::StrCat("", "", ""); EXPECT_EQ(result, ""); result = absl::StrCat("", "", "", ""); EXPECT_EQ(result, ""); result = absl::StrCat("", "", "", "", ""); EXPECT_EQ(result, ""); } TEST(StrCat, NullConstCharPtr) { const char* null = nullptr; EXPECT_EQ(absl::StrCat("mon", null, "key"), "monkey"); } template <typename T> struct Mallocator { typedef T value_type; typedef size_t size_type; typedef ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; size_type max_size() const { return size_t(std::numeric_limits<size_type>::max()) / sizeof(value_type); } template <typename U> struct rebind { typedef Mallocator<U> other; }; Mallocator() = default; template <class U> Mallocator(const Mallocator<U>&) {} T* allocate(size_t n) { return static_cast<T*>(std::malloc(n * sizeof(T))); } void deallocate(T* p, size_t) { std::free(p); } }; template <typename T, typename U> bool operator==(const Mallocator<T>&, const Mallocator<U>&) { return true; } template <typename T, typename U> bool operator!=(const Mallocator<T>&, const Mallocator<U>&) { return false; } TEST(StrCat, CustomAllocator) { using mstring = std::basic_string<char, std::char_traits<char>, Mallocator<char>>; const mstring str1("PARACHUTE OFF A BLIMP INTO MOSCONE!!"); const mstring str2("Read this book about coffee tables"); std::string result = absl::StrCat(str1, str2); EXPECT_EQ(result, "PARACHUTE OFF A BLIMP INTO MOSCONE!!" "Read this book about coffee tables"); } TEST(StrCat, MaxArgs) { std::string result; result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a"); EXPECT_EQ(result, "123456789a"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b"); EXPECT_EQ(result, "123456789ab"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c"); EXPECT_EQ(result, "123456789abc"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d"); EXPECT_EQ(result, "123456789abcd"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e"); EXPECT_EQ(result, "123456789abcde"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f"); EXPECT_EQ(result, "123456789abcdef"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g"); EXPECT_EQ(result, "123456789abcdefg"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h"); EXPECT_EQ(result, "123456789abcdefgh"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i"); EXPECT_EQ(result, "123456789abcdefghi"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j"); EXPECT_EQ(result, "123456789abcdefghij"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"); EXPECT_EQ(result, "123456789abcdefghijk"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"); EXPECT_EQ(result, "123456789abcdefghijkl"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"); EXPECT_EQ(result, "123456789abcdefghijklm"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n"); EXPECT_EQ(result, "123456789abcdefghijklmn"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o"); EXPECT_EQ(result, "123456789abcdefghijklmno"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"); EXPECT_EQ(result, "123456789abcdefghijklmnop"); result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q"); EXPECT_EQ(result, "123456789abcdefghijklmnopq"); result = absl::StrCat( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"); EXPECT_EQ(result, "12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"); } TEST(StrAppend, Basics) { std::string result = "existing text"; std::string strs[] = {"Hello", "Cruel", "World"}; std::string stdstrs[] = { "std::Hello", "std::Cruel", "std::World" }; absl::string_view pieces[] = {"Hello", "Cruel", "World"}; const char* c_strs[] = { "Hello", "Cruel", "World" }; int32_t i32s[] = {'H', 'C', 'W'}; uint64_t ui64s[] = {12345678910LL, 10987654321LL}; std::string::size_type old_size = result.size(); absl::StrAppend(&result); EXPECT_EQ(result.size(), old_size); old_size = result.size(); absl::StrAppend(&result, strs[0]); EXPECT_EQ(result.substr(old_size), "Hello"); old_size = result.size(); absl::StrAppend(&result, strs[1], pieces[2]); EXPECT_EQ(result.substr(old_size), "CruelWorld"); old_size = result.size(); absl::StrAppend(&result, stdstrs[0], ", ", pieces[2]); EXPECT_EQ(result.substr(old_size), "std::Hello, World"); old_size = result.size(); absl::StrAppend(&result, strs[0], ", ", stdstrs[1], " ", strs[2], "!"); EXPECT_EQ(result.substr(old_size), "Hello, std::Cruel World!"); old_size = result.size(); absl::StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]); EXPECT_EQ(result.substr(old_size), "Hello, Cruel World"); old_size = result.size(); absl::StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]); EXPECT_EQ(result.substr(old_size), "Hello, Cruel World"); old_size = result.size(); absl::StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!"); EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!"); old_size = result.size(); absl::StrAppend(&result, ui64s[0], ", ", ui64s[1], "!"); EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!"); std::string one = "1"; old_size = result.size(); absl::StrAppend(&result, "And a ", one.size(), " and a ", &result[2] - &result[0], " and a ", one, " 2 3 4", "!"); EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!"); old_size = result.size(); absl::StrAppend(&result, "To output a char by ASCII/numeric value, use +: ", '!' + 0); EXPECT_EQ(result.substr(old_size), "To output a char by ASCII/numeric value, use +: 33"); old_size = result.size(); absl::StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9); EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889"); old_size = result.size(); absl::StrAppend( &result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "No limit thanks to C++11's variadic templates"); EXPECT_EQ(result.substr(old_size), "12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" "No limit thanks to C++11's variadic templates"); } TEST(StrCat, VectorBoolReferenceTypes) { std::vector<bool> v; v.push_back(true); v.push_back(false); std::vector<bool> const& cv = v; std::string result = absl::StrCat(v[0], v[1], cv[0], cv[1]); EXPECT_EQ(result, "1010"); } TEST(StrCat, AvoidsMemcpyWithNullptr) { EXPECT_EQ(absl::StrCat(42, absl::string_view{}), "42"); EXPECT_EQ(absl::StrCat(1, 2, 3, 4, 5, absl::string_view{}), "12345"); std::string result; absl::StrAppend(&result, 1, 2, 3, 4, 5, absl::string_view{}); EXPECT_EQ(result, "12345"); } #if GTEST_HAS_DEATH_TEST TEST(StrAppend, Death) { std::string s = "self"; ABSL_EXPECT_DEBUG_DEATH(absl::StrAppend(&s, s.c_str() + 1), "ssertion.*failed"); ABSL_EXPECT_DEBUG_DEATH(absl::StrAppend(&s, s), "ssertion.*failed"); } #endif TEST(StrAppend, CornerCases) { std::string result; absl::StrAppend(&result, ""); EXPECT_EQ(result, ""); absl::StrAppend(&result, "", ""); EXPECT_EQ(result, ""); absl::StrAppend(&result, "", "", ""); EXPECT_EQ(result, ""); absl::StrAppend(&result, "", "", "", ""); EXPECT_EQ(result, ""); absl::StrAppend(&result, "", "", "", "", ""); EXPECT_EQ(result, ""); } TEST(StrAppend, CornerCasesNonEmptyAppend) { for (std::string result : {"hello", "a string too long to fit in the SSO"}) { const std::string expected = result; absl::StrAppend(&result, ""); EXPECT_EQ(result, expected); absl::StrAppend(&result, "", ""); EXPECT_EQ(result, expected); absl::StrAppend(&result, "", "", ""); EXPECT_EQ(result, expected); absl::StrAppend(&result, "", "", "", ""); EXPECT_EQ(result, expected); absl::StrAppend(&result, "", "", "", "", ""); EXPECT_EQ(result, expected); } } template <typename IntType> void CheckHex(IntType v, const char* nopad_format, const char* zeropad_format, const char* spacepad_format) { char expected[256]; std::string actual = absl::StrCat(absl::Hex(v, absl::kNoPad)); snprintf(expected, sizeof(expected), nopad_format, v); EXPECT_EQ(expected, actual) << " decimal value " << v; for (int spec = absl::kZeroPad2; spec <= absl::kZeroPad20; ++spec) { std::string actual = absl::StrCat(absl::Hex(v, static_cast<absl::PadSpec>(spec))); snprintf(expected, sizeof(expected), zeropad_format, spec - absl::kZeroPad2 + 2, v); EXPECT_EQ(expected, actual) << " decimal value " << v; } for (int spec = absl::kSpacePad2; spec <= absl::kSpacePad20; ++spec) { std::string actual = absl::StrCat(absl::Hex(v, static_cast<absl::PadSpec>(spec))); snprintf(expected, sizeof(expected), spacepad_format, spec - absl::kSpacePad2 + 2, v); EXPECT_EQ(expected, actual) << " decimal value " << v; } } template <typename IntType> void CheckDec(IntType v, const char* nopad_format, const char* zeropad_format, const char* spacepad_format) { char expected[256]; std::string actual = absl::StrCat(absl::Dec(v, absl::kNoPad)); snprintf(expected, sizeof(expected), nopad_format, v); EXPECT_EQ(expected, actual) << " decimal value " << v; for (int spec = absl::kZeroPad2; spec <= absl::kZeroPad20; ++spec) { std::string actual = absl::StrCat(absl::Dec(v, static_cast<absl::PadSpec>(spec))); snprintf(expected, sizeof(expected), zeropad_format, spec - absl::kZeroPad2 + 2, v); EXPECT_EQ(expected, actual) << " decimal value " << v << " format '" << zeropad_format << "' digits " << (spec - absl::kZeroPad2 + 2); } for (int spec = absl::kSpacePad2; spec <= absl::kSpacePad20; ++spec) { std::string actual = absl::StrCat(absl::Dec(v, static_cast<absl::PadSpec>(spec))); snprintf(expected, sizeof(expected), spacepad_format, spec - absl::kSpacePad2 + 2, v); EXPECT_EQ(expected, actual) << " decimal value " << v << " format '" << spacepad_format << "' digits " << (spec - absl::kSpacePad2 + 2); } } void CheckHexDec64(uint64_t v) { unsigned long long ullv = v; CheckHex(ullv, "%llx", "%0*llx", "%*llx"); CheckDec(ullv, "%llu", "%0*llu", "%*llu"); long long llv = static_cast<long long>(ullv); CheckDec(llv, "%lld", "%0*lld", "%*lld"); if (sizeof(v) == sizeof(&v)) { auto uintptr = static_cast<uintptr_t>(v); void* ptr = reinterpret_cast<void*>(uintptr); CheckHex(ptr, "%llx", "%0*llx", "%*llx"); } } void CheckHexDec32(uint32_t uv) { CheckHex(uv, "%x", "%0*x", "%*x"); CheckDec(uv, "%u", "%0*u", "%*u"); int32_t v = static_cast<int32_t>(uv); CheckDec(v, "%d", "%0*d", "%*d"); if (sizeof(v) == sizeof(&v)) { auto uintptr = static_cast<uintptr_t>(v); void* ptr = reinterpret_cast<void*>(uintptr); CheckHex(ptr, "%x", "%0*x", "%*x"); } } void CheckAll(uint64_t v) { CheckHexDec64(v); CheckHexDec32(static_cast<uint32_t>(v)); } void TestFastPrints() { for (int i = 0; i < 10000; i++) { CheckAll(i); } CheckAll(std::numeric_limits<uint64_t>::max()); CheckAll(std::numeric_limits<uint64_t>::max() - 1); CheckAll(std::numeric_limits<int64_t>::min()); CheckAll(std::numeric_limits<int64_t>::min() + 1); CheckAll(std::numeric_limits<uint32_t>::max()); CheckAll(std::numeric_limits<uint32_t>::max() - 1); CheckAll(std::numeric_limits<int32_t>::min()); CheckAll(std::numeric_limits<int32_t>::min() + 1); CheckAll(999999999); CheckAll(1000000000); CheckAll(9999999999); CheckAll(10000000000); CheckAll(999999999999999999); CheckAll(9999999999999999999u); CheckAll(1000000000000000000); CheckAll(10000000000000000000u); CheckAll(999999999876543210); CheckAll(9999999999876543210u); CheckAll(0x123456789abcdef0); CheckAll(0x12345678); int8_t minus_one_8bit = -1; EXPECT_EQ("ff", absl::StrCat(absl::Hex(minus_one_8bit))); int16_t minus_one_16bit = -1; EXPECT_EQ("ffff", absl::StrCat(absl::Hex(minus_one_16bit))); } TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); } struct PointStringify { template <typename FormatSink> friend void AbslStringify(FormatSink& sink, const PointStringify& p) { sink.Append("("); sink.Append(absl::StrCat(p.x)); sink.Append(", "); sink.Append(absl::StrCat(p.y)); sink.Append(")"); } double x = 10.0; double y = 20.0; }; TEST(StrCat, AbslStringifyExample) { PointStringify p; EXPECT_EQ(absl::StrCat(p), "(10, 20)"); EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z"); } struct PointStringifyUsingFormat { template <typename FormatSink> friend void AbslStringify(FormatSink& sink, const PointStringifyUsingFormat& p) { absl::Format(&sink, "(%g, %g)", p.x, p.y); } double x = 10.0; double y = 20.0; }; TEST(StrCat, AbslStringifyExampleUsingFormat) { PointStringifyUsingFormat p; EXPECT_EQ(absl::StrCat(p), "(10, 20)"); EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z"); } enum class EnumWithStringify { Many = 0, Choices = 1 }; template <typename Sink> void AbslStringify(Sink& sink, EnumWithStringify e) { absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices"); } TEST(StrCat, AbslStringifyWithEnum) { const auto e = EnumWithStringify::Choices; EXPECT_EQ(absl::StrCat(e), "Choices"); } template <typename Integer> void CheckSingleArgumentIntegerLimits() { Integer max = std::numeric_limits<Integer>::max(); Integer min = std::numeric_limits<Integer>::min(); EXPECT_EQ(absl::StrCat(max), std::to_string(max)); EXPECT_EQ(absl::StrCat(min), std::to_string(min)); } TEST(StrCat, SingleArgumentLimits) { CheckSingleArgumentIntegerLimits<int32_t>(); CheckSingleArgumentIntegerLimits<uint32_t>(); CheckSingleArgumentIntegerLimits<int64_t>(); CheckSingleArgumentIntegerLimits<uint64_t>(); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_cat.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_cat_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
af8f681c-9589-46e6-9745-f5b0612bf4d2
cpp
google/quiche
moqt_messages
quiche/quic/moqt/moqt_messages.cc
quiche/quic/moqt/moqt_messages_test.cc
#include "quiche/quic/moqt/moqt_messages.h" #include <cstdint> #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/common/platform/api/quiche_bug_tracker.h" namespace moqt { MoqtObjectStatus IntegerToObjectStatus(uint64_t integer) { if (integer >= static_cast<uint64_t>(MoqtObjectStatus::kInvalidObjectStatus)) { return MoqtObjectStatus::kInvalidObjectStatus; } return static_cast<MoqtObjectStatus>(integer); } MoqtFilterType GetFilterType(const MoqtSubscribe& message) { if (!message.end_group.has_value() && message.end_object.has_value()) { return MoqtFilterType::kNone; } bool has_start = message.start_group.has_value() && message.start_object.has_value(); if (message.end_group.has_value()) { if (has_start) { if (*message.end_group < *message.start_group) { return MoqtFilterType::kNone; } else if (*message.end_group == *message.start_group && *message.end_object <= *message.start_object) { if (*message.end_object < *message.start_object) { return MoqtFilterType::kNone; } else if (*message.end_object == *message.start_object) { return MoqtFilterType::kAbsoluteStart; } } return MoqtFilterType::kAbsoluteRange; } } else { if (has_start) { return MoqtFilterType::kAbsoluteStart; } else if (!message.start_group.has_value()) { if (message.start_object.has_value()) { if (message.start_object.value() == 0) { return MoqtFilterType::kLatestGroup; } } else { return MoqtFilterType::kLatestObject; } } } return MoqtFilterType::kNone; } std::string MoqtMessageTypeToString(const MoqtMessageType message_type) { switch (message_type) { case MoqtMessageType::kClientSetup: return "CLIENT_SETUP"; case MoqtMessageType::kServerSetup: return "SERVER_SETUP"; case MoqtMessageType::kSubscribe: return "SUBSCRIBE_REQUEST"; case MoqtMessageType::kSubscribeOk: return "SUBSCRIBE_OK"; case MoqtMessageType::kSubscribeError: return "SUBSCRIBE_ERROR"; case MoqtMessageType::kUnsubscribe: return "UNSUBSCRIBE"; case MoqtMessageType::kSubscribeDone: return "SUBSCRIBE_DONE"; case MoqtMessageType::kSubscribeUpdate: return "SUBSCRIBE_UPDATE"; case MoqtMessageType::kAnnounceCancel: return "ANNOUNCE_CANCEL"; case MoqtMessageType::kTrackStatusRequest: return "TRACK_STATUS_REQUEST"; case MoqtMessageType::kTrackStatus: return "TRACK_STATUS"; case MoqtMessageType::kAnnounce: return "ANNOUNCE"; case MoqtMessageType::kAnnounceOk: return "ANNOUNCE_OK"; case MoqtMessageType::kAnnounceError: return "ANNOUNCE_ERROR"; case MoqtMessageType::kUnannounce: return "UNANNOUNCE"; case MoqtMessageType::kGoAway: return "GOAWAY"; case MoqtMessageType::kSubscribeNamespace: return "SUBSCRIBE_NAMESPACE"; case MoqtMessageType::kSubscribeNamespaceOk: return "SUBSCRIBE_NAMESPACE_OK"; case MoqtMessageType::kSubscribeNamespaceError: return "SUBSCRIBE_NAMESPACE_ERROR"; case MoqtMessageType::kUnsubscribeNamespace: return "UNSUBSCRIBE_NAMESPACE"; case MoqtMessageType::kMaxSubscribeId: return "MAX_SUBSCRIBE_ID"; case MoqtMessageType::kObjectAck: return "OBJECT_ACK"; } return "Unknown message " + std::to_string(static_cast<int>(message_type)); } std::string MoqtDataStreamTypeToString(MoqtDataStreamType type) { switch (type) { case MoqtDataStreamType::kObjectDatagram: return "OBJECT_PREFER_DATAGRAM"; case MoqtDataStreamType::kStreamHeaderTrack: return "STREAM_HEADER_TRACK"; case MoqtDataStreamType::kStreamHeaderSubgroup: return "STREAM_HEADER_SUBGROUP"; case MoqtDataStreamType::kPadding: return "PADDING"; } return "Unknown stream type " + absl::StrCat(static_cast<int>(type)); } std::string MoqtForwardingPreferenceToString( MoqtForwardingPreference preference) { switch (preference) { case MoqtForwardingPreference::kDatagram: return "DATAGRAM"; case MoqtForwardingPreference::kTrack: return "TRACK"; case MoqtForwardingPreference::kSubgroup: return "SUBGROUP"; } QUIC_BUG(quic_bug_bad_moqt_message_type_01) << "Unknown preference " << std::to_string(static_cast<int>(preference)); return "Unknown preference " + std::to_string(static_cast<int>(preference)); } MoqtForwardingPreference GetForwardingPreference(MoqtDataStreamType type) { switch (type) { case MoqtDataStreamType::kObjectDatagram: return MoqtForwardingPreference::kDatagram; case MoqtDataStreamType::kStreamHeaderTrack: return MoqtForwardingPreference::kTrack; case MoqtDataStreamType::kStreamHeaderSubgroup: return MoqtForwardingPreference::kSubgroup; default: break; } QUIC_BUG(quic_bug_bad_moqt_message_type_02) << "Message type does not indicate forwarding preference"; return MoqtForwardingPreference::kSubgroup; }; MoqtDataStreamType GetMessageTypeForForwardingPreference( MoqtForwardingPreference preference) { switch (preference) { case MoqtForwardingPreference::kDatagram: return MoqtDataStreamType::kObjectDatagram; case MoqtForwardingPreference::kTrack: return MoqtDataStreamType::kStreamHeaderTrack; case MoqtForwardingPreference::kSubgroup: return MoqtDataStreamType::kStreamHeaderSubgroup; } QUIC_BUG(quic_bug_bad_moqt_message_type_03) << "Forwarding preference does not indicate message type"; return MoqtDataStreamType::kStreamHeaderSubgroup; } std::string FullTrackName::ToString() const { std::vector<std::string> bits; bits.reserve(tuple_.size()); for (absl::string_view raw_bit : tuple_) { bits.push_back(absl::StrCat("\"", absl::CHexEscape(raw_bit), "\"")); } return absl::StrCat("{", absl::StrJoin(bits, ", "), "}"); } bool FullTrackName::operator==(const FullTrackName& other) const { if (tuple_.size() != other.tuple_.size()) { return false; } return absl::c_equal(tuple_, other.tuple_); } bool FullTrackName::operator<(const FullTrackName& other) const { return absl::c_lexicographical_compare(tuple_, other.tuple_); } FullTrackName::FullTrackName(absl::Span<const absl::string_view> elements) : tuple_(elements.begin(), elements.end()) {} }
#include "quiche/quic/moqt/moqt_messages.h" #include <vector> #include "absl/hash/hash.h" #include "absl/strings/string_view.h" #include "quiche/common/platform/api/quiche_test.h" namespace moqt::test { namespace { TEST(MoqtMessagesTest, FullTrackNameConstructors) { FullTrackName name1({"foo", "bar"}); std::vector<absl::string_view> list = {"foo", "bar"}; FullTrackName name2(list); EXPECT_EQ(name1, name2); EXPECT_EQ(absl::HashOf(name1), absl::HashOf(name2)); } TEST(MoqtMessagesTest, FullTrackNameOrder) { FullTrackName name1({"a", "b"}); FullTrackName name2({"a", "b", "c"}); FullTrackName name3({"b", "a"}); EXPECT_LT(name1, name2); EXPECT_LT(name2, name3); EXPECT_LT(name1, name3); } TEST(MoqtMessagesTest, FullTrackNameInNamespace) { FullTrackName name1({"a", "b"}); FullTrackName name2({"a", "b", "c"}); FullTrackName name3({"d", "b"}); EXPECT_TRUE(name2.InNamespace(name1)); EXPECT_FALSE(name1.InNamespace(name2)); EXPECT_TRUE(name1.InNamespace(name1)); EXPECT_FALSE(name2.InNamespace(name3)); } TEST(MoqtMessagesTest, FullTrackNameToString) { FullTrackName name1({"a", "b"}); EXPECT_EQ(name1.ToString(), R"({"a", "b"})"); FullTrackName name2({"\xff", "\x61"}); EXPECT_EQ(name2.ToString(), R"({"\xff", "a"})"); } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_messages.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_messages_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
ca502529-5b2d-41f7-a2a5-93772bdad653
cpp
google/arolla
eval
arolla/expr/eval/eval.cc
arolla/expr/eval/eval_test.cc
#include "arolla/expr/eval/eval.h" #include <algorithm> #include <cstddef> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "arolla/expr/eval/dynamic_compiled_expr.h" #include "arolla/expr/eval/prepare_expression.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_debug_string.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_stack_trace.h" #include "arolla/memory/frame.h" #include "arolla/qexpr/evaluation_engine.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/typed_slot.h" #include "arolla/util/fingerprint.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr { absl::StatusOr<std::unique_ptr<CompiledExpr>> CompileForDynamicEvaluation( const DynamicEvaluationEngineOptions& options, const ExprNodePtr& expr, const absl::flat_hash_map<std::string, QTypePtr>& input_types, const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs) { auto expr_with_side_outputs = expr; std::vector<std::string> side_output_names; if (!side_outputs.empty()) { side_output_names.reserve(side_outputs.size()); for (const auto& [name, _] : side_outputs) { side_output_names.push_back(name); } std::sort(side_output_names.begin(), side_output_names.end()); std::vector<ExprNodePtr> exprs = {expr_with_side_outputs}; exprs.reserve(side_outputs.size() + 1); for (const auto& name : side_output_names) { exprs.push_back(side_outputs.at(name)); } ASSIGN_OR_RETURN( expr_with_side_outputs, BindOp(eval_internal::InternalRootOperator(), std::move(exprs), {})); } std::shared_ptr<LightweightExprStackTrace> stack_trace = nullptr; if (options.enable_expr_stack_trace) { stack_trace = std::make_shared<LightweightExprStackTrace>(); } ASSIGN_OR_RETURN( ExprNodePtr prepared_expr, eval_internal::PrepareExpression(expr_with_side_outputs, input_types, options, stack_trace)); auto placeholder_keys = GetPlaceholderKeys(prepared_expr); if (!placeholder_keys.empty()) { return absl::FailedPreconditionError(absl::StrFormat( "placeholders should be substituted before " "evaluation: %s, got %s", absl::StrJoin(placeholder_keys, ","), ToDebugString(prepared_expr))); } absl::flat_hash_map<Fingerprint, QTypePtr> node_types; ASSIGN_OR_RETURN(prepared_expr, eval_internal::ExtractQTypesForCompilation( prepared_expr, &node_types, stack_trace)); if (stack_trace != nullptr) { stack_trace->AddRepresentations(expr_with_side_outputs, prepared_expr); } ASSIGN_OR_RETURN(auto used_input_types, eval_internal::LookupLeafQTypes(prepared_expr, node_types)); ASSIGN_OR_RETURN(auto named_output_types, eval_internal::LookupNamedOutputTypes( prepared_expr, side_output_names, node_types)); for (const auto& [key, qtype] : used_input_types) { if (qtype == nullptr) { return absl::FailedPreconditionError(absl::StrFormat( "unable to deduce input type for L.%s in the expression %s", key, GetDebugSnippet(prepared_expr))); } } ASSIGN_OR_RETURN(QTypePtr output_type, eval_internal::LookupQType(prepared_expr, node_types)); if (output_type == nullptr) { return absl::FailedPreconditionError( absl::StrFormat("unable to deduce output type in the expression %s", GetDebugSnippet(prepared_expr))); } return std::unique_ptr<CompiledExpr>(new eval_internal::DynamicCompiledExpr( options, std::move(used_input_types), output_type, std::move(named_output_types), std::move(prepared_expr), std::move(side_output_names), std::move(node_types), std::move(stack_trace))); } absl::StatusOr<std::unique_ptr<BoundExpr>> CompileAndBindForDynamicEvaluation( const DynamicEvaluationEngineOptions& options, FrameLayout::Builder* layout_builder, const ExprNodePtr& expr, const absl::flat_hash_map<std::string, TypedSlot>& input_slots, std::optional<TypedSlot> output_slot, const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs) { ASSIGN_OR_RETURN(auto compiled_expr, CompileForDynamicEvaluation( options, expr, SlotsToTypes(input_slots), side_outputs)); ASSIGN_OR_RETURN( auto executable_expr, compiled_expr->Bind(layout_builder, input_slots, output_slot)); if (output_slot.has_value() && executable_expr->output_slot() != *output_slot) { return absl::InternalError("expression bound to a wrong output slot"); } return executable_expr; } absl::StatusOr<std::shared_ptr<BoundExpr>> CompileAndBindExprOperator( const DynamicEvaluationEngineOptions& options, FrameLayout::Builder* layout_builder, const ExprOperatorPtr& op, absl::Span<const TypedSlot> input_slots, std::optional<TypedSlot> output_slot) { std::vector<absl::StatusOr<ExprNodePtr>> inputs; inputs.reserve(input_slots.size()); absl::flat_hash_map<std::string, TypedSlot> input_slots_map; input_slots_map.reserve(input_slots.size()); for (size_t i = 0; i < input_slots.size(); ++i) { std::string name = absl::StrFormat("input_%d", i); inputs.push_back(Leaf(name)); input_slots_map.emplace(name, input_slots[i]); } ASSIGN_OR_RETURN(auto expr, CallOp(op, inputs)); ASSIGN_OR_RETURN(auto evaluator, CompileAndBindForDynamicEvaluation( options, layout_builder, expr, input_slots_map, output_slot)); return std::shared_ptr<BoundExpr>(std::move(evaluator)); } }
#include "arolla/expr/eval/eval.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "arolla/dense_array/dense_array.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/expr/backend_wrapping_operator.h" #include "arolla/expr/basic_expr_operator.h" #include "arolla/expr/eval/executable_builder.h" #include "arolla/expr/eval/extensions.h" #include "arolla/expr/eval/invoke.h" #include "arolla/expr/eval/prepare_expression.h" #include "arolla/expr/eval/side_output.h" #include "arolla/expr/eval/test_utils.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_attributes.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/lambda_expr_operator.h" #include "arolla/expr/optimization/default/default_optimizer.h" #include "arolla/expr/testing/test_operators.h" #include "arolla/expr/testing/testing.h" #include "arolla/expr/tuple_expr_operator.h" #include "arolla/io/accessors_input_loader.h" #include "arolla/io/input_loader.h" #include "arolla/memory/frame.h" #include "arolla/memory/memory_allocation.h" #include "arolla/memory/optional_value.h" #include "arolla/qexpr/bound_operators.h" #include "arolla/qexpr/eval_context.h" #include "arolla/qexpr/evaluation_engine.h" #include "arolla/qexpr/operators.h" #include "arolla/qexpr/qexpr_operator_signature.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/testing/qtype.h" #include "arolla/qtype/typed_slot.h" #include "arolla/qtype/typed_value.h" #include "arolla/util/fast_dynamic_downcast_final.h" #include "arolla/util/fingerprint.h" #include "arolla/util/text.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr { namespace { using ::absl_testing::IsOk; using ::absl_testing::IsOkAndHolds; using ::absl_testing::StatusIs; using ::arolla::testing::InvokeExprOperator; using ::arolla::testing::TypedValueWith; using ::arolla::testing::WithExportAnnotation; using ::arolla::testing::WithNameAnnotation; using ::arolla::testing::WithQTypeAnnotation; using ::testing::_; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::FloatEq; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::Pair; using ::testing::Property; using ::testing::UnorderedElementsAre; struct TestParams { bool use_default_optimizer = false; }; class EvalVisitorParameterizedTest : public ::testing::TestWithParam<TestParams> { protected: EvalVisitorParameterizedTest() { if (GetParam().use_default_optimizer) { auto optimizer_or = DefaultOptimizer(); CHECK_OK(optimizer_or.status()); options_.optimizer = optimizer_or.value(); } options_.collect_op_descriptions = true; } DynamicEvaluationEngineOptions options_; }; INSTANTIATE_TEST_SUITE_P( Optimizer, EvalVisitorParameterizedTest, ::testing::Values(TestParams{.use_default_optimizer = false}, TestParams{.use_default_optimizer = true})); TEST_P(EvalVisitorParameterizedTest, SmokeTest) { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {CallOp("math.add", {Leaf("x"), Leaf("y")}), Leaf("z")})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); ctx.Set(z_slot, 100.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 111.0f); EXPECT_EQ(ctx.Get(x_slot), 1.0f); EXPECT_EQ(ctx.Get(y_slot), 10.0f); EXPECT_EQ(ctx.Get(z_slot), 100.0f); } TEST_P(EvalVisitorParameterizedTest, ReusingInputSlots) { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {CallOp("math.add", {CallOp("math.add", {Leaf("x1"), Leaf("x2")}), Leaf("x3")}), Leaf("x4")})); DynamicEvaluationEngineOptions options{.collect_op_descriptions = true}; auto create_input_slots = [](FrameLayout::Builder& layout_builder) { return absl::flat_hash_map<std::string, TypedSlot>{ {"x1", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}, {"x2", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}, {"x3", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}, {"x4", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}}; }; { FrameLayout::Builder layout_builder; auto input_slots = create_input_slots(layout_builder); EXPECT_THAT( CompileAndBindForDynamicEvaluation(options, &layout_builder, expr, input_slots), IsOkAndHolds(AllOf( InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x14] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x18] = math.add(FLOAT32 [0x14], FLOAT32 [0x08])", "FLOAT32 [0x10] = math.add(FLOAT32 [0x18], FLOAT32 [0x0C])")))); } { options.allow_overriding_input_slots = true; FrameLayout::Builder layout_builder; auto input_slots = create_input_slots(layout_builder); EXPECT_THAT( CompileAndBindForDynamicEvaluation(options, &layout_builder, expr, input_slots), IsOkAndHolds(AllOf( InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x14] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x04] = math.add(FLOAT32 [0x14], FLOAT32 [0x08])", "FLOAT32 [0x10] = math.add(FLOAT32 [0x04], FLOAT32 [0x0C])")))); } } TEST_P(EvalVisitorParameterizedTest, NamedNodesTest) { constexpr int kIters = 10; ASSERT_OK_AND_ASSIGN(auto xpy, CallOp("math.add", {Leaf("x"), Leaf("y")})); auto expr = xpy; for (int i = 0; i < kIters; ++i) { ASSERT_OK_AND_ASSIGN( expr, CallOp("math.maximum", {expr, WithNameAnnotation(expr, std::to_string(i))})); } FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])", "FLOAT32 [0x08] = math.maximum(FLOAT32 [0x10], FLOAT32 " "[0x10])"))); FrameLayout layout = std::move(layout_builder).Build(); EXPECT_EQ(layout.AllocSize(), sizeof(float) * 5); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 11); } TEST_P(EvalVisitorParameterizedTest, WithUsedSubSlotOfInput) { ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.has", {Leaf("x")})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<OptionalValue<float>>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "OPTIONAL_UNIT [0x08] = core._copy(OPTIONAL_UNIT [0x00])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<OptionalUnit>()); EXPECT_EQ(ctx.Get(output_slot), kPresent); EXPECT_EQ(ctx.Get(x_slot), 1.0f); } TEST_P(EvalVisitorParameterizedTest, WithUsedSubSlotOfIntermediate) { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("core.has", {CallOp("math.add", {Leaf("x"), Leaf("y")})})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<OptionalValue<float>>(); auto y_slot = layout_builder.AddSlot<OptionalValue<float>>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "OPTIONAL_FLOAT32 [0x14] = math.add(OPTIONAL_FLOAT32 [0x00], " "OPTIONAL_FLOAT32 [0x08])", "OPTIONAL_UNIT [0x10] = core._copy(OPTIONAL_UNIT [0x14])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<OptionalUnit>()); EXPECT_EQ(ctx.Get(output_slot), kPresent); EXPECT_EQ(ctx.Get(x_slot), 1.0f); EXPECT_EQ(ctx.Get(y_slot), 10.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithNamedOutput) { DynamicEvaluationEngineOptions options; options.collect_op_descriptions = true; ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {WithExportAnnotation( CallOp("math.add", {Leaf("x"), Leaf("y")}), "x+y"), Leaf("z")})); ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]), ExtractSideOutputs(expr)); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); const QTypePtr f32 = GetQType<float>(); ASSERT_OK_AND_ASSIGN(auto compiled_expr, CompileForDynamicEvaluation( options, stripped_expr, {{"x", f32}, {"y", f32}, {"z", f32}}, side_outputs)); EXPECT_EQ(compiled_expr->output_type(), f32); EXPECT_THAT(compiled_expr->named_output_types(), UnorderedElementsAre(Pair("x+y", f32))); auto typed_output_slot = AddSlot(compiled_expr->output_type(), &layout_builder); ASSERT_OK_AND_ASSIGN(auto executable_expr, compiled_expr->Bind(&layout_builder, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}}, typed_output_slot)); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); EXPECT_EQ(layout.AllocSize(), sizeof(float) * 5) << "Side outputs shouldn't create any extra overhead"; RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); ctx.Set(z_slot, 100.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, typed_output_slot.ToSlot<float>()); ASSERT_THAT(executable_expr->named_output_slots(), UnorderedElementsAre(Pair("x+y", _))); ASSERT_OK_AND_ASSIGN( auto xpy_slot, executable_expr->named_output_slots().at("x+y").ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 111.0f); EXPECT_EQ(ctx.Get(xpy_slot), 11.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithSideOutput) { DynamicEvaluationEngineOptions options; options.collect_op_descriptions = true; ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Leaf("x"), Leaf("y")})); ASSERT_OK_AND_ASSIGN(auto side_output_expr, CallOp("math.multiply", {Leaf("y"), Leaf("z")})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN(auto executable_expr, CompileAndBindForDynamicEvaluation( options, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}}, std::nullopt, {{"y*z", side_output_expr}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x10] = math.multiply(FLOAT32 [0x04], FLOAT32 " "[0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); ctx.Set(z_slot, 100.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); ASSERT_THAT(executable_expr->named_output_slots(), UnorderedElementsAre(Pair("y*z", _))); ASSERT_OK_AND_ASSIGN( auto side_output_slot, executable_expr->named_output_slots().at("y*z").ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 11.0f); EXPECT_EQ(ctx.Get(side_output_slot), 1000.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithShortCircuit) { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("core.where", {Leaf("do_divide"), CallOp("math.multiply", {Leaf("x"), Leaf("y")}), CallOp("math.floordiv", {Leaf("x"), Leaf("y")})})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<OptionalValue<int>>(); auto y_slot = layout_builder.AddSlot<int>(); auto do_divide_slot = layout_builder.AddSlot<OptionalUnit>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation( options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"do_divide", TypedSlot::FromSlot(do_divide_slot)}})); if (GetParam().use_default_optimizer) { EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "OPTIONAL_INT32 [0x18] = core.to_optional._scalar(INT32 " "[0x08])", "jump_if_not<+2>(OPTIONAL_UNIT [0x0C])", "OPTIONAL_INT32 [0x10] = math.multiply(OPTIONAL_INT32 " "[0x00], OPTIONAL_INT32 [0x18])", "jump<+1>()", "OPTIONAL_INT32 [0x10] = math.floordiv(OPTIONAL_INT32 " "[0x00], OPTIONAL_INT32 [0x18])"))); } else { EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "OPTIONAL_INT32 [0x18] = core.to_optional._scalar(INT32 " "[0x08])", "OPTIONAL_INT32 [0x20] = math.multiply(OPTIONAL_INT32 " "[0x00], OPTIONAL_INT32 [0x18])", "OPTIONAL_INT32 [0x28] = math.floordiv(OPTIONAL_INT32 " "[0x00], OPTIONAL_INT32 [0x18])", "OPTIONAL_INT32 [0x10] = core.where(OPTIONAL_UNIT [0x0C], " "OPTIONAL_INT32 [0x20], OPTIONAL_INT32 [0x28])"))); } FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1); ctx.Set(y_slot, 0); ctx.Set(do_divide_slot, kPresent); if (GetParam().use_default_optimizer) { EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN( auto output_slot, executable_expr->output_slot().ToSlot<OptionalValue<int>>()); EXPECT_EQ(ctx.Get(output_slot), 0); } else { EXPECT_THAT(executable_expr->Execute(&ctx), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("division by zero; during evaluation of " "operator math.floordiv"))); } } TEST_P(EvalVisitorParameterizedTest, EvalWithNamedOutputUnusedButExported) { DynamicEvaluationEngineOptions options; options.collect_op_descriptions = true; ASSERT_OK_AND_ASSIGN( auto first_op, MakeLambdaOperator(ExprOperatorSignature::Make("p0, _px, _py"), Placeholder("p0"))); ASSERT_OK_AND_ASSIGN( auto expr, CallOp(first_op, {CallOp("math.add", {Leaf("x"), Leaf("z")}), WithExportAnnotation(CallOp("math.add", {Leaf("x"), Leaf("y")}), "x+y"), WithExportAnnotation( CallOp("math.multiply", {Leaf("y"), Leaf("z")}), "y*z")})); ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]), ExtractSideOutputs(expr)); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN(auto executable_expr, CompileAndBindForDynamicEvaluation( options, &layout_builder, stripped_expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}}, std::nullopt, side_outputs)); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x08])", "FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x14] = math.multiply(FLOAT32 [0x04], FLOAT32 " "[0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); EXPECT_EQ(layout.AllocSize(), sizeof(float) * 6) << "Side outputs used outside of main expression require " "extra slots"; RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); ctx.Set(z_slot, 100.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 101.0f); ASSERT_THAT(executable_expr->named_output_slots(), UnorderedElementsAre(Pair("x+y", _), Pair("y*z", _))); ASSERT_OK_AND_ASSIGN( auto xpy_slot, executable_expr->named_output_slots().at("x+y").ToSlot<float>()); EXPECT_EQ(ctx.Get(xpy_slot), 11.0f); ASSERT_OK_AND_ASSIGN( auto xtz_slot, executable_expr->named_output_slots().at("y*z").ToSlot<float>()); EXPECT_EQ(ctx.Get(xtz_slot), 1000.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithExportAnnotation) { DynamicEvaluationEngineOptions options; options.collect_op_descriptions = true; ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {WithExportAnnotation( CallOp("math.add", {Leaf("x"), Leaf("y")}), "x+y"), Leaf("z")})); ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]), ExtractSideOutputs(expr)); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN(auto executable_expr, CompileAndBindForDynamicEvaluation( options, &layout_builder, stripped_expr, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}}, std::nullopt, side_outputs)); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])", "FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 1.0f); ctx.Set(y_slot, 10.0f); ctx.Set(z_slot, 100.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); ASSERT_THAT(executable_expr->named_output_slots(), UnorderedElementsAre(Pair("x+y", _))); ASSERT_OK_AND_ASSIGN( auto xpy_slot, executable_expr->named_output_slots().at("x+y").ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 111.0f); EXPECT_EQ(ctx.Get(xpy_slot), 11.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithExportAnnotation_AllLiterals) { DynamicEvaluationEngineOptions options; options.collect_op_descriptions = true; ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {Literal(1.f), WithExportAnnotation(Literal(10.f), "out_y")})); ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]), ExtractSideOutputs(expr)); FrameLayout::Builder layout_builder; ASSERT_OK_AND_ASSIGN(auto executable_expr, CompileAndBindForDynamicEvaluation( options, &layout_builder, stripped_expr, {}, std::nullopt, side_outputs)); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre("FLOAT32 [0x04] = 11.\n" "FLOAT32 [0x08] = 10."), EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); ASSERT_THAT(executable_expr->named_output_slots(), UnorderedElementsAre(Pair("out_y", _))); ASSERT_OK_AND_ASSIGN( auto out_y_slot, executable_expr->named_output_slots().at("out_y").ToSlot<float>()); EXPECT_EQ(ctx.Get(output_slot), 11.0f); EXPECT_EQ(ctx.Get(out_y_slot), 10.0f); } TEST_P(EvalVisitorParameterizedTest, EvalWithLiteral) { ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Leaf("x"), Literal(1.f)})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre("FLOAT32 [0x08] = 1."), EvalOperationsAre( "FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x08])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 2.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_THAT(ctx.Get(output_slot), Eq(3.0f)); } TEST_P(EvalVisitorParameterizedTest, EvalSingleLeaf) { auto expr = Leaf("x"); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto output_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}}, TypedSlot::FromSlot(output_slot))); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre("FLOAT32 [0x04] = core._copy(FLOAT32 [0x00])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 2.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(ctx.Get(output_slot), Eq(2.0f)); } TEST_P(EvalVisitorParameterizedTest, EvalOnlyLiterals) { auto x = Literal(2.f); auto y = Literal(1.f); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y})); FrameLayout::Builder layout_builder; ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre("FLOAT32 [0x04] = 3."), EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); ctx.Set(output_slot, 57.0f); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); EXPECT_THAT(ctx.Get(output_slot), Eq(57.0f)); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(ctx.Get(output_slot), Eq(3.0f)); } TEST_P(EvalVisitorParameterizedTest, EvalUnboundLeafError) { ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Leaf("x"), Leaf("y")})); EXPECT_THAT( CompileForDynamicEvaluation(options_, expr, {{"y", GetQType<float>()}}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("missing QType information for inputs {x}"))); EXPECT_THAT( CompileForDynamicEvaluation(options_, expr, {}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("missing QType information for inputs {x, y}"))); ASSERT_OK_AND_ASSIGN(auto compiled_model, CompileForDynamicEvaluation(options_, expr, {{"x", GetQType<float>()}, {"y", GetQType<float>()}})); FrameLayout::Builder layout_builder; EXPECT_THAT(compiled_model->Bind( &layout_builder, {{"y", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}}, TypedSlot::FromSlot(layout_builder.AddSlot<float>())), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("missed slots: x"))); EXPECT_THAT(compiled_model->Bind( &layout_builder, {}, TypedSlot::FromSlot(layout_builder.AddSlot<float>())), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("missed slots: x,y"))); } TEST_P(EvalVisitorParameterizedTest, EvalPlaceholderError) { auto x = Literal(2.f); ASSERT_OK_AND_ASSIGN( auto y, WithQTypeAnnotation(Placeholder("y"), GetQType<float>())); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y})); EXPECT_THAT( CompileForDynamicEvaluation(options_, expr, {{"y", GetQType<float>()}}), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr( "placeholders should be substituted before evaluation: y"))); } TEST_P(EvalVisitorParameterizedTest, EvalOperatorTakingSameNodeTwice) { auto x = Leaf("x"); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, x})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x00])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 2.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_THAT(ctx.Get(output_slot), Eq(4.0f)); } TEST_P(EvalVisitorParameterizedTest, EvalOperatorTakingTwoEqualNodes) { auto x = Leaf("x"); auto y = Leaf("x"); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre(), EvalOperationsAre( "FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x00])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ctx.Set(x_slot, 2.0f); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); ASSERT_OK_AND_ASSIGN(auto output_slot, executable_expr->output_slot().ToSlot<float>()); EXPECT_THAT(ctx.Get(output_slot), Eq(4.0f)); } TEST_P(EvalVisitorParameterizedTest, EvalOperatorWithUnusedInputs) { ASSERT_OK_AND_ASSIGN( auto op_with_unused_input, MakeLambdaOperator(ExprOperatorSignature{{"unused_input"}}, Literal<int32_t>(1))); ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_with_unused_input, {Leaf("x")})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}})); EXPECT_THAT( executable_expr, AllOf(InitOperationsAre("INT32 [0x08] = 1"), EvalOperationsAre("INT32 [0x04] = core._copy(INT32 [0x08])"))); } TEST_P(EvalVisitorParameterizedTest, GetNth) { const auto x = Literal<float>(2.f); const auto y = Literal<int64_t>(3); ASSERT_OK_AND_ASSIGN(const auto tuple, CallOp("core.make_tuple", {x, y})); ASSERT_OK_AND_ASSIGN(const auto first, CallOp("core.get_first", {tuple})); ASSERT_OK_AND_ASSIGN(const auto second, CallOp("core.get_second", {tuple})); ASSERT_OK_AND_ASSIGN(const auto second_by_index, CallOp(std::make_shared<GetNthOperator>(1), {tuple})); ASSERT_OK_AND_ASSIGN(auto executable_first, CompileForDynamicEvaluation(options_, first)); ASSERT_OK_AND_ASSIGN(auto executable_second, CompileForDynamicEvaluation(options_, second)); ASSERT_OK_AND_ASSIGN(auto executable_second_by_index, CompileForDynamicEvaluation(options_, second_by_index)); FrameLayout::Builder layout_builder; ASSERT_OK_AND_ASSIGN(auto bound_executable_first, executable_first->Bind(&layout_builder)); EXPECT_THAT( bound_executable_first, AllOf(InitOperationsAre("FLOAT32 [0x04] = 2."), EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])"))); ASSERT_OK_AND_ASSIGN(auto bound_executable_second, executable_second->Bind(&layout_builder)); EXPECT_THAT( bound_executable_second, AllOf(InitOperationsAre("INT64 [0x10] = int64{3}"), EvalOperationsAre("INT64 [0x08] = core._copy(INT64 [0x10])"))); ASSERT_OK_AND_ASSIGN(auto bound_executable_second_by_index, executable_second_by_index->Bind(&layout_builder)); EXPECT_THAT( bound_executable_second_by_index, AllOf(InitOperationsAre("INT64 [0x20] = int64{3}"), EvalOperationsAre("INT64 [0x18] = core._copy(INT64 [0x20])"))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); ASSERT_OK_AND_ASSIGN(auto output_first, bound_executable_first->output_slot().ToSlot<float>()); EXPECT_OK(bound_executable_first->InitializeLiterals(&ctx)); EXPECT_OK(bound_executable_first->Execute(&ctx)); EXPECT_THAT(ctx.Get(output_first), FloatEq(2.0f)); ASSERT_OK_AND_ASSIGN( auto output_second, bound_executable_second->output_slot().ToSlot<int64_t>()); EXPECT_OK(bound_executable_second->InitializeLiterals(&ctx)); EXPECT_OK(bound_executable_second->Execute(&ctx)); EXPECT_THAT(ctx.Get(output_second), Eq(3)); ASSERT_OK_AND_ASSIGN( auto output_second_by_index, bound_executable_second->output_slot().ToSlot<int64_t>()); EXPECT_OK(bound_executable_second_by_index->InitializeLiterals(&ctx)); EXPECT_OK(bound_executable_second_by_index->Execute(&ctx)); EXPECT_THAT(ctx.Get(output_second_by_index), Eq(3)); } TEST_P(EvalVisitorParameterizedTest, OptimizedHas) { auto ten_times_has = Leaf("x"); for (int i = 0; i < 10; ++i) { ASSERT_OK_AND_ASSIGN(ten_times_has, CallOp("core.has", {ten_times_has})); } FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<OptionalValue<float>>(); EXPECT_THAT( CompileAndBindForDynamicEvaluation(options_, &layout_builder, ten_times_has, {{"x", TypedSlot::FromSlot(x_slot)}}), IsOkAndHolds(AllOf( InitOperationsAre(), EvalOperationsAre( "OPTIONAL_UNIT [0x08] = core._copy(OPTIONAL_UNIT [0x00])")))); } class IdentityAnnotation final : public AnnotationExprOperatorTag, public ExprOperatorWithFixedSignature { public: IdentityAnnotation() : ExprOperatorWithFixedSignature( "id", ExprOperatorSignature::MakeArgsN(1), "", FingerprintHasher("arolla::expr::IdentityAnnotation").Finish()) {} absl::StatusOr<ExprAttributes> InferAttributes( absl::Span<const ExprAttributes> inputs) const final { return inputs[0]; } }; TEST_P(EvalVisitorParameterizedTest, EvalAnnotation) { auto x = Leaf("x"); const auto with_annotation = std::make_shared<IdentityAnnotation>(); ASSERT_OK_AND_ASSIGN(auto expr, CallOp(with_annotation, {x})); EXPECT_THAT(Invoke(expr, {{"x", TypedValue::FromValue(2.0f)}}), IsOkAndHolds(TypedValueWith<float>(2.0f))); } TEST_P(EvalVisitorParameterizedTest, SlotRecycling) { ASSERT_OK_AND_ASSIGN(auto float_sum, CallOp("math.maximum", {Leaf("x"), Literal<float>(57)})); ASSERT_OK_AND_ASSIGN(float_sum, CallOp("math.maximum", {float_sum, Leaf("x")})); ASSERT_OK_AND_ASSIGN(auto float_sum_4, CallOp("math.maximum", {float_sum, Leaf("x")})); ASSERT_OK_AND_ASSIGN(float_sum, CallOp("math.maximum", {float_sum_4, Leaf("x")})); ASSERT_OK_AND_ASSIGN(float_sum, CallOp("math.maximum", {float_sum, Leaf("x")})); ASSERT_OK_AND_ASSIGN(float_sum, CallOp("math.maximum", {float_sum, Leaf("x")})); ASSERT_OK_AND_ASSIGN(auto float_sum_8, CallOp("math.maximum", {float_sum, Leaf("x")})); { FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); EXPECT_THAT( CompileAndBindForDynamicEvaluation( options_, &layout_builder, float_sum_8, {{"x", TypedSlot::FromSlot(x_slot)}}), IsOkAndHolds(AllOf( InitOperationsAre("FLOAT32 [0x08] = 57."), EvalOperationsAre( "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x08])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])", "FLOAT32 [0x04] = math.maximum(FLOAT32 [0x10], FLOAT32 " "[0x00])")))); } { FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); EXPECT_THAT( CompileAndBindForDynamicEvaluation( options_, &layout_builder, float_sum_8, {{"x", TypedSlot::FromSlot(x_slot)}}, {}, {{"sum_of_four", float_sum_4}}), IsOkAndHolds(AllOf( InitOperationsAre("FLOAT32 [0x08] = 57."), EvalOperationsAre( "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x08])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])", "FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])", "FLOAT32 [0x14] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])", "FLOAT32 [0x10] = math.maximum(FLOAT32 [0x14], FLOAT32 [0x00])", "FLOAT32 [0x04] = math.maximum(FLOAT32 [0x10], FLOAT32 " "[0x00])"), Pointee(Property(&BoundExpr::named_output_slots, UnorderedElementsAre(Pair( "sum_of_four", Property(&TypedSlot::byte_offset, 0x0C)))))))); } { ASSERT_OK_AND_ASSIGN( auto int_sum, CallOp("math.maximum", {Leaf("y"), Literal<int32_t>(57)})); ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(auto int_sum_8, CallOp("math.maximum", {int_sum, Leaf("y")})); ASSERT_OK_AND_ASSIGN(auto sums_pair, CallOp("core.make_tuple", {int_sum_8, float_sum_8})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<int32_t>(); EXPECT_THAT( CompileAndBindForDynamicEvaluation( options_, &layout_builder, sums_pair, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}}), IsOkAndHolds(AllOf( InitOperationsAre("INT32 [0x10] = 57\n" "FLOAT32 [0x1C] = 57."), EvalOperationsAre( "INT32 [0x14] = math.maximum(INT32 [0x04], INT32 [0x10])", "INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])", "INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])", "INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])", "INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])", "INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])", "INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])", "FLOAT32 [0x20] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x1C])", "FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])", "FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])", "FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])", "FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])", "FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])", "FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])", "tuple<INT32,FLOAT32> [0x08] = core.make_tuple(INT32 [0x14], " "FLOAT32 [0x20])")))); } } TEST_P(EvalVisitorParameterizedTest, TupleSubslotsNotRecycled) { ASSERT_OK_AND_ASSIGN(auto xy_tuple, CallOp("core.make_tuple", {Leaf("x"), Leaf("y")})); ASSERT_OK_AND_ASSIGN(auto xyz_tuple, CallOp("core.make_tuple", {xy_tuple, Leaf("z")})); ASSERT_OK_AND_ASSIGN( auto x_plus_z, CallOp("math.maximum", {CallOp("core.get_first", {CallOp("core.get_first", {xyz_tuple})}), CallOp("core.get_second", {xyz_tuple})})); ASSERT_OK_AND_ASSIGN(auto x_plus_z_2, CallOp("math.maximum", {x_plus_z, x_plus_z})); ASSERT_OK_AND_ASSIGN( auto x_plus_z_again, CallOp("core.get_first", {CallOp("core.make_tuple", {x_plus_z, x_plus_z_2})})); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<float>(); auto y_slot = layout_builder.AddSlot<float>(); auto z_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN(auto bound_expr, CompileAndBindForDynamicEvaluation( options_, &layout_builder, x_plus_z_again, {{"x", TypedSlot::FromSlot(x_slot)}, {"y", TypedSlot::FromSlot(y_slot)}, {"z", TypedSlot::FromSlot(z_slot)}})); if (GetParam().use_default_optimizer) { EXPECT_THAT(bound_expr, AllOf(InitOperationsAre(), EvalOperationsAre("FLOAT32 [0x0C] = math.maximum(FLOAT32 " "[0x00], FLOAT32 [0x08])"))); } else { EXPECT_THAT( bound_expr, AllOf( InitOperationsAre(), EvalOperationsAre( "tuple<FLOAT32,FLOAT32> [0x10]" " = core.make_tuple(FLOAT32 [0x00], FLOAT32 [0x04])", "tuple<tuple<FLOAT32,FLOAT32>,FLOAT32> [0x18]" " = core.make_tuple(tuple<FLOAT32,FLOAT32> [0x10], FLOAT32 " "[0x08])", "FLOAT32 [0x24] = math.maximum(FLOAT32 [0x18], FLOAT32 [0x20])", "FLOAT32 [0x28] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x24])", "tuple<FLOAT32,FLOAT32> [0x10]" " = core.make_tuple(FLOAT32 [0x24], FLOAT32 [0x28])", "FLOAT32 [0x0C] = core._copy(FLOAT32 [0x10])"))); } } struct Point3D { float x; float y; float z; }; TEST_P(EvalVisitorParameterizedTest, TestWithInputLoader) { auto x = Leaf("x"); auto y = Leaf("y"); auto z = Leaf("z"); ASSERT_OK_AND_ASSIGN(auto xy, CallOp("math.add", {x, y})); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {xy, z})); FrameLayout::Builder layout_builder; ASSERT_OK_AND_ASSIGN(auto loader, CreateAccessorsInputLoader<Point3D>( "x", [](const Point3D& p) { return p.x; }, "y", [](const Point3D& p) { return p.y; }, "z", [](const Point3D& p) { return p.z; })); ASSERT_OK_AND_ASSIGN(auto output_types, GetInputLoaderQTypes(*loader, GetLeafKeys(expr))); auto input_slots = AddSlotsMap(output_types, &layout_builder); ASSERT_OK_AND_ASSIGN(auto bound_loader, loader->Bind(input_slots)); ASSERT_OK_AND_ASSIGN(auto executable_expr, CompileAndBindForDynamicEvaluation( options_, &layout_builder, expr, input_slots)); ASSERT_OK_AND_ASSIGN(auto output, executable_expr->output_slot().ToSlot<float>()); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); EXPECT_OK(executable_expr->InitializeLiterals(&ctx)); ASSERT_OK(bound_loader({1.0f, 10.0f, 100.0f}, ctx.frame())); EXPECT_THAT(executable_expr->Execute(&ctx), IsOk()); EXPECT_THAT(ctx.Get(output), Eq(111.0f)); } TEST_P(EvalVisitorParameterizedTest, DetailedStackTrace) { ASSERT_OK_AND_ASSIGN( auto sum_of_4_lambda, MakeLambdaOperator( "sum_of_4", ExprOperatorSignature{{"x"}}, CallOp("math.sum", {Placeholder("x"), CallOp("edge.from_sizes", {CallOp("math.multiply", {Literal(CreateDenseArray<int64_t>({1, 1})), Literal(2)})})}))); ASSERT_OK_AND_ASSIGN(auto expr, CallOp(sum_of_4_lambda, {Leaf("x")})); auto options = DynamicEvaluationEngineOptions{.enable_expr_stack_trace = true}; FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<DenseArray<int64_t>>(); auto result_slot = layout_builder.AddSlot<DenseArray<int64_t>>(); ASSERT_OK_AND_ASSIGN( auto executable_expr, CompileAndBindForDynamicEvaluation(options, &layout_builder, expr, {{"x", TypedSlot::FromSlot(x_slot)}}, TypedSlot::FromSlot(result_slot))); auto layout = std::move(layout_builder).Build(); MemoryAllocation alloc(&layout); EvaluationContext ctx; executable_expr->InitializeLiterals(&ctx, alloc.frame()); executable_expr->Execute(&ctx, alloc.frame()); EXPECT_THAT( ctx.status(), StatusIs( absl::StatusCode::kInvalidArgument, HasSubstr("argument sizes mismatch: (4, 0); " "during evaluation of operator math._sum\n" "ORIGINAL NODE: sum_of_4(L.x)\n" "COMPILED NODE: M.math._sum(L.x, dense_array_edge(" "split_points=dense_array([int64{0}, int64{2}, int64{4}]))" ", optional_int64{0})"))); } TEST_P(EvalVisitorParameterizedTest, OperatorWithoutProxy) { FrameLayout::Builder layout_builder; ASSERT_OK_AND_ASSIGN( auto node, CallOp(std::make_shared<::arolla::expr::testing::DummyOp>( "test.Dummy", ExprOperatorSignature::MakeVariadicArgs()), {})); EXPECT_THAT( CompileAndBindForDynamicEvaluation(options_, &layout_builder, node, {}), StatusIs( absl::StatusCode::kInvalidArgument, HasSubstr("test.Dummy is not a builtin or backend ExprOperator; " "while compiling node test.Dummy():INT32; the expression " "is likely not fully compiled and is using derived " "operators that are not supported in the backend"))); } TEST_P(EvalVisitorParameterizedTest, DenseArrayStringReplace) { EXPECT_THAT(InvokeExprOperator<DenseArray<Text>>( "strings.replace", CreateDenseArray<Text>({Text("Fuzzy"), Text("Wuzzy")}), Text("zz"), Text("zzz")), IsOkAndHolds(::testing::ElementsAre( absl::string_view("Fuzzzy"), absl::string_view("Wuzzzy")))); } TEST_P(EvalVisitorParameterizedTest, VectorPrintf) { DenseArray<Text> format_spec = CreateConstDenseArray<Text>(3, "%s's atomic weight is %.4f"); DenseArray<Text> elements = CreateDenseArray<Text>( {Text("Hydrogen"), Text("Helium"), Text("Lithium")}); DenseArray<float> weights = CreateDenseArray<float>({1.0079f, 4.0026, 6.9410}); EXPECT_THAT(InvokeExprOperator<DenseArray<Text>>( "strings.printf", format_spec, elements, weights), IsOkAndHolds(ElementsAre("Hydrogen's atomic weight is 1.0079", "Helium's atomic weight is 4.0026", "Lithium's atomic weight is 6.9410"))); } TEST_P(EvalVisitorParameterizedTest, CompileAndBindExprOperator) { ASSERT_OK_AND_ASSIGN( auto x_plus_y_plus_1_op, MakeLambdaOperator( ExprOperatorSignature::Make("x, y"), CallOp("math.add", {Placeholder("x"), CallOp("math.add", {Placeholder("y"), Literal<int64_t>(1)})}))); FrameLayout::Builder layout_builder; auto x_slot = layout_builder.AddSlot<int64_t>(); auto y_slot = layout_builder.AddSlot<int64_t>(); auto result_slot = layout_builder.AddSlot<int64_t>(); ASSERT_OK_AND_ASSIGN( std::shared_ptr<BoundExpr> executable, CompileAndBindExprOperator( options_, &layout_builder, x_plus_y_plus_1_op, {TypedSlot::FromSlot(x_slot), TypedSlot::FromSlot(y_slot)}, TypedSlot::FromSlot(result_slot))); FrameLayout layout = std::move(layout_builder).Build(); RootEvaluationContext ctx(&layout); ctx.Set(x_slot, 10); ctx.Set(y_slot, 100); ASSERT_OK(executable->InitializeLiterals(&ctx)); ASSERT_OK(executable->Execute(&ctx)); EXPECT_THAT(ctx.Get(result_slot), Eq(111)); } class HigherLevelTestOperator final : public BasicExprOperator { public: HigherLevelTestOperator() : BasicExprOperator( "test.higher_level_test_op", ExprOperatorSignature::MakeArgsN(1), "", FingerprintHasher( "arolla::expr::eval_internal::HigherLevelTestOperator") .Finish()) {} absl::StatusOr<QTypePtr> GetOutputQType( absl::Span<const QTypePtr> input_qtypes) const final { return GetQType<float>(); } }; class LowerLevelTestOperator final : public BasicExprOperator, public BuiltinExprOperatorTag { public: LowerLevelTestOperator() : BasicExprOperator( "test.lower_level_test_op", ExprOperatorSignature::MakeArgsN(1), "", FingerprintHasher( "arolla::expr::eval_internal::LowerLevelTestOperator") .Finish()) {} absl::StatusOr<QTypePtr> GetOutputQType( absl::Span<const QTypePtr> input_qtypes) const final { return GetQType<float>(); } }; TEST_P(EvalVisitorParameterizedTest, Extensions) { eval_internal::NodeTransformationFn lower_transformation = [](const DynamicEvaluationEngineOptions&, ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { if (node->is_op() && fast_dynamic_downcast_final<const HigherLevelTestOperator*>( node->op().get()) != nullptr) { return BindOp(std::make_shared<LowerLevelTestOperator>(), node->node_deps(), {}); } return node; }; eval_internal::CompilerExtensionRegistry::GetInstance() .RegisterNodeTransformationFn(lower_transformation); eval_internal::CompileOperatorFn compile_test_op = [](eval_internal::CompileOperatorFnArgs args) -> std::optional<absl::Status> { if (fast_dynamic_downcast_final<const LowerLevelTestOperator*>( args.op.get()) == nullptr) { return std::nullopt; } ASSIGN_OR_RETURN(auto output_slot, args.output_slot.ToSlot<float>()); args.executable_builder->AddEvalOp( MakeBoundOperator( [output_slot](EvaluationContext* ctx, FramePtr frame) { frame.Set(output_slot, 57); }), eval_internal::FormatOperatorCall("lower level test operator", {}, {args.output_slot}), "lower level test operator"); return absl::OkStatus(); }; eval_internal::CompilerExtensionRegistry::GetInstance() .RegisterCompileOperatorFn(compile_test_op); ASSERT_OK_AND_ASSIGN( auto expr, CallOp(std::make_shared<HigherLevelTestOperator>(), {Leaf("x")})); FrameLayout::Builder layout_builder; auto x_slot = TypedSlot::FromSlot(layout_builder.AddSlot<float>()); ASSERT_OK_AND_ASSIGN(auto bound_expr, CompileAndBindForDynamicEvaluation( options_, &layout_builder, expr, {{"x", x_slot}})); EXPECT_THAT( bound_expr, AllOf(InitOperationsAre(), EvalOperationsAre("FLOAT32 [0x04] = lower level test operator()"))); } class OperatorThatFailsBind : public QExprOperator { public: OperatorThatFailsBind() : QExprOperator(QExprOperatorSignature::Get({GetQType<float>()}, GetQType<float>())) {} absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind( absl::Span<const TypedSlot> input_slots, TypedSlot output_slot) const final { return absl::InternalError("test error"); } }; TEST_P(EvalVisitorParameterizedTest, OperatorThatFailsBind) { OperatorRegistry qexpr_registry; ASSERT_OK(qexpr_registry.RegisterOperator( "test.operator_that_fails_bind", std::make_unique<OperatorThatFailsBind>())); ExprOperatorPtr op = std::make_shared<BackendWrappingOperator>( "test.operator_that_fails_bind", ExprOperatorSignature::MakeVariadicArgs(), [](absl::Span<const QTypePtr> input_qtypes) -> absl::StatusOr<QTypePtr> { return GetQType<float>(); }, ""); ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x")})); FrameLayout::Builder layout_builder; auto x_slot = TypedSlot::FromSlot(layout_builder.AddSlot<float>()); DynamicEvaluationEngineOptions options(options_); options.operator_directory = &qexpr_registry; EXPECT_THAT( CompileAndBindForDynamicEvaluation(options, &layout_builder, expr, {{"x", x_slot}}), StatusIs(absl::StatusCode::kInternal, HasSubstr("test error; while binding operator " "test.operator_that_fails_bind; while compiling node " "test.operator_that_fails_bind(L.x)"))); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/eval.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/eval_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
d13a71f9-9641-49d4-9959-d13d8a3bdbf0
cpp
google/tensorstore
bfloat16
tensorstore/util/bfloat16.h
tensorstore/util/bfloat16_test.cc
#ifndef TENSORSTORE_UTIL_BFLOAT16_H_ #define TENSORSTORE_UTIL_BFLOAT16_H_ #include <cassert> #include <cmath> #include <cstdint> #include <cstring> #include <limits> #include <type_traits> #include "absl/base/casts.h" #include <nlohmann/json_fwd.hpp> namespace tensorstore { class BFloat16; } namespace std { template <> struct numeric_limits<::tensorstore::BFloat16>; } namespace tensorstore { namespace internal { BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v); BFloat16 Float32ToBfloat16RoundNearestEven(float v); float Bfloat16ToFloat(BFloat16 v); } class BFloat16 { public: constexpr BFloat16() : rep_(0) {} template <typename T, typename = std::enable_if_t<std::is_convertible_v<T, float>>> explicit BFloat16(T x) { if constexpr (std::is_same_v<T, bool>) { rep_ = static_cast<uint16_t>(x) * 0x3f80; } else if constexpr (std::numeric_limits<T>::is_integer) { *this = internal::NumericFloat32ToBfloat16RoundNearestEven( static_cast<float>(x)); } else { *this = internal::Float32ToBfloat16RoundNearestEven(static_cast<float>(x)); } } operator float() const { return internal::Bfloat16ToFloat(*this); } BFloat16& operator=(float v) { return *this = static_cast<BFloat16>(v); } BFloat16& operator=(bool v) { return *this = static_cast<BFloat16>(v); } template <typename T> std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> operator=( T v) { return *this = static_cast<BFloat16>(v); } #define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(OP) \ friend BFloat16 operator OP(BFloat16 a, BFloat16 b) { \ return BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \ } \ template <typename T> \ friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \ operator OP(BFloat16 a, T b) { \ return BFloat16(static_cast<float>(a) OP b); \ } \ template <typename T> \ friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \ operator OP(T a, BFloat16 b) { \ return BFloat16(a OP static_cast<float>(b)); \ } \ #define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(OP) \ friend BFloat16& operator OP##=(BFloat16& a, BFloat16 b) { \ return a = BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \ } \ template <typename T> \ friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> \ operator OP##=(BFloat16& a, T b) { \ return a = BFloat16(static_cast<float>(a) OP b); \ } \ TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(+) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(+) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(-) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(-) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(*) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(*) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(/) TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(/) #undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP #undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP friend BFloat16 operator-(BFloat16 a) { BFloat16 result; result.rep_ = a.rep_ ^ 0x8000; return result; } friend BFloat16 operator+(BFloat16 a) { return a; } friend BFloat16 operator++(BFloat16& a) { a += BFloat16(1); return a; } friend BFloat16 operator--(BFloat16& a) { a -= BFloat16(1); return a; } friend BFloat16 operator++(BFloat16& a, int) { BFloat16 original_value = a; ++a; return original_value; } friend BFloat16 operator--(BFloat16& a, int) { BFloat16 original_value = a; --a; return original_value; } template <template <typename U, typename V, typename... Args> class ObjectType , template <typename U, typename... Args> class ArrayType , class StringType , class BooleanType , class NumberIntegerType , class NumberUnsignedType , class NumberFloatType , template <typename U> class AllocatorType , template <typename T, typename SFINAE = void> class JSONSerializer , class BinaryType > friend void to_json( ::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType, NumberIntegerType, NumberUnsignedType, NumberFloatType, AllocatorType, JSONSerializer, BinaryType>& j, BFloat16 v) { j = static_cast<NumberFloatType>(v); } struct bitcast_construct_t {}; explicit constexpr BFloat16(bitcast_construct_t, uint16_t rep) : rep_(rep) {} uint16_t rep_; }; inline bool isinf(BFloat16 x) { return std::isinf(static_cast<float>(x)); } inline bool signbit(BFloat16 x) { return std::signbit(static_cast<float>(x)); } inline bool isnan(BFloat16 x) { return std::isnan(static_cast<float>(x)); } inline bool isfinite(BFloat16 x) { return std::isfinite(static_cast<float>(x)); } inline BFloat16 abs(BFloat16 x) { x.rep_ &= 0x7fff; return x; } inline BFloat16 exp(BFloat16 x) { return BFloat16(std::exp(static_cast<float>(x))); } inline BFloat16 exp2(BFloat16 x) { return BFloat16(std::exp2(static_cast<float>(x))); } inline BFloat16 expm1(BFloat16 x) { return BFloat16(std::expm1(static_cast<float>(x))); } inline BFloat16 log(BFloat16 x) { return BFloat16(std::log(static_cast<float>(x))); } inline BFloat16 log1p(BFloat16 x) { return BFloat16(std::log1p(static_cast<float>(x))); } inline BFloat16 log10(BFloat16 x) { return BFloat16(std::log10(static_cast<float>(x))); } inline BFloat16 log2(BFloat16 x) { return BFloat16(std::log2(static_cast<float>(x))); } inline BFloat16 sqrt(BFloat16 x) { return BFloat16(std::sqrt(static_cast<float>(x))); } inline BFloat16 pow(BFloat16 x, BFloat16 y) { return BFloat16(std::pow(static_cast<float>(x), static_cast<float>(y))); } inline BFloat16 sin(BFloat16 x) { return BFloat16(std::sin(static_cast<float>(x))); } inline BFloat16 cos(BFloat16 x) { return BFloat16(std::cos(static_cast<float>(x))); } inline BFloat16 tan(BFloat16 x) { return BFloat16(std::tan(static_cast<float>(x))); } inline BFloat16 asin(BFloat16 x) { return BFloat16(std::asin(static_cast<float>(x))); } inline BFloat16 acos(BFloat16 x) { return BFloat16(std::acos(static_cast<float>(x))); } inline BFloat16 atan(BFloat16 x) { return BFloat16(std::atan(static_cast<float>(x))); } inline BFloat16 sinh(BFloat16 x) { return BFloat16(std::sinh(static_cast<float>(x))); } inline BFloat16 cosh(BFloat16 x) { return BFloat16(std::cosh(static_cast<float>(x))); } inline BFloat16 tanh(BFloat16 x) { return BFloat16(std::tanh(static_cast<float>(x))); } inline BFloat16 asinh(BFloat16 x) { return BFloat16(std::asinh(static_cast<float>(x))); } inline BFloat16 acosh(BFloat16 x) { return BFloat16(std::acosh(static_cast<float>(x))); } inline BFloat16 atanh(BFloat16 x) { return BFloat16(std::atanh(static_cast<float>(x))); } inline BFloat16 floor(BFloat16 x) { return BFloat16(std::floor(static_cast<float>(x))); } inline BFloat16 trunc(BFloat16 x) { return BFloat16(std::trunc(static_cast<float>(x))); } inline BFloat16 rint(BFloat16 x) { return BFloat16(std::rint(static_cast<float>(x))); } inline BFloat16 ceil(BFloat16 x) { return BFloat16(std::ceil(static_cast<float>(x))); } inline BFloat16 fmod(BFloat16 x, BFloat16 y) { return BFloat16(std::fmod(static_cast<float>(x), static_cast<float>(y))); } inline BFloat16 fmin(BFloat16 a, BFloat16 b) { return BFloat16(std::fmin(static_cast<float>(a), static_cast<float>(b))); } inline BFloat16 fmax(BFloat16 a, BFloat16 b) { return BFloat16(std::fmax(static_cast<float>(a), static_cast<float>(b))); } inline BFloat16 nextafter(BFloat16 from, BFloat16 to) { const uint16_t from_as_int = absl::bit_cast<uint16_t>(from), to_as_int = absl::bit_cast<uint16_t>(to); const uint16_t sign_mask = 1 << 15; float from_as_float(from), to_as_float(to); if (std::isnan(from_as_float) || std::isnan(to_as_float)) { return BFloat16(std::numeric_limits<float>::quiet_NaN()); } if (from_as_int == to_as_int) { return to; } if (from_as_float == 0) { if (to_as_float == 0) { return to; } else { return absl::bit_cast<BFloat16, uint16_t>((to_as_int & sign_mask) | 1); } } uint16_t from_sign = from_as_int & sign_mask; uint16_t to_sign = to_as_int & sign_mask; uint16_t from_abs = from_as_int & ~sign_mask; uint16_t to_abs = to_as_int & ~sign_mask; uint16_t magnitude_adjustment = (from_abs > to_abs || from_sign != to_sign) ? 0xFFFF : 0x0001; return absl::bit_cast<BFloat16, uint16_t>(from_as_int + magnitude_adjustment); } namespace internal { inline uint16_t GetFloat32High16(float v) { return static_cast<uint16_t>(absl::bit_cast<uint32_t>(v) >> 16); } inline BFloat16 Float32ToBfloat16Truncate(float v) { uint32_t bits = absl::bit_cast<uint32_t>(v); if (std::isnan(v)) { bits |= (static_cast<uint32_t>(1) << 21); } return absl::bit_cast<BFloat16, uint16_t>(bits >> 16); } inline BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v) { assert(!std::isnan(v)); uint32_t input = absl::bit_cast<uint32_t>(v); const uint32_t lsb = (input >> 16) & 1; const uint32_t rounding_bias = 0x7fff + lsb; input += rounding_bias; return absl::bit_cast<BFloat16, uint16_t>(input >> 16); } inline BFloat16 Float32ToBfloat16RoundNearestEven(float v) { if (std::isnan(v)) { return tensorstore::BFloat16( tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>((absl::bit_cast<uint32_t>(v) | 0x00200000u) >> 16)); } return NumericFloat32ToBfloat16RoundNearestEven(v); } inline float Bfloat16ToFloat(BFloat16 v) { return absl::bit_cast<float>( static_cast<uint32_t>(absl::bit_cast<uint16_t>(v)) << 16); } } } namespace std { template <> struct numeric_limits<tensorstore::BFloat16> { static constexpr bool is_specialized = true; static constexpr bool is_signed = true; static constexpr bool is_integer = false; static constexpr bool is_exact = false; static constexpr bool has_infinity = true; static constexpr bool has_quiet_NaN = true; static constexpr bool has_signaling_NaN = true; static constexpr float_denorm_style has_denorm = std::denorm_present; static constexpr bool has_denorm_loss = false; static constexpr std::float_round_style round_style = numeric_limits<float>::round_style; static constexpr bool is_iec559 = false; static constexpr bool is_bounded = true; static constexpr bool is_modulo = false; static constexpr int digits = 8; static constexpr int digits10 = 2; static constexpr int max_digits10 = 4; static constexpr int radix = 2; static constexpr int min_exponent = numeric_limits<float>::min_exponent; static constexpr int min_exponent10 = numeric_limits<float>::min_exponent10; static constexpr int max_exponent = numeric_limits<float>::max_exponent; static constexpr int max_exponent10 = numeric_limits<float>::max_exponent10; static constexpr bool traps = numeric_limits<float>::traps; static constexpr bool tinyness_before = numeric_limits<float>::tinyness_before; static constexpr tensorstore::BFloat16 min() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x0080)); } static constexpr tensorstore::BFloat16 lowest() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0xff7f)); } static constexpr tensorstore::BFloat16 max() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x7f7f)); } static constexpr tensorstore::BFloat16 epsilon() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x3c00)); } static constexpr tensorstore::BFloat16 round_error() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x3f00)); } static constexpr tensorstore::BFloat16 infinity() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x7f80)); } static constexpr tensorstore::BFloat16 quiet_NaN() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x7fc0)); } static constexpr tensorstore::BFloat16 signaling_NaN() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x7f81)); } static constexpr tensorstore::BFloat16 denorm_min() { return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{}, static_cast<uint16_t>(0x0001)); } }; } #endif
#include "tensorstore/util/bfloat16.h" #include <cmath> #include <cstdint> #include <cstring> #include <limits> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/base/casts.h" #include "tensorstore/internal/json_gtest.h" namespace { using ::tensorstore::internal::Float32ToBfloat16RoundNearestEven; using ::tensorstore::internal::Float32ToBfloat16Truncate; using bfloat16_t = tensorstore::BFloat16; ::testing::Matcher<bfloat16_t> MatchesBits(uint16_t bits) { return ::testing::ResultOf( [](bfloat16_t y) { return absl::bit_cast<uint16_t>(y); }, ::testing::Eq(bits)); } ::testing::Matcher<float> NearFloat(float x, float relative_error = 1e-3) { return ::testing::FloatNear(x, std::abs(x) * relative_error); } float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa, uint32_t low_mantissa) { float dest; uint32_t src = (sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa; memcpy(static_cast<void*>(&dest), static_cast<const void*>(&src), sizeof(dest)); return dest; } void TestTruncate(float input, float expected_truncation, float expected_rounding) { bfloat16_t truncated = Float32ToBfloat16Truncate(input); bfloat16_t rounded = Float32ToBfloat16RoundNearestEven(input); if (std::isnan(input)) { EXPECT_TRUE(std::isnan(truncated)); EXPECT_TRUE(std::isnan(rounded)); return; } EXPECT_EQ(expected_truncation, static_cast<float>(truncated)); EXPECT_EQ(expected_rounding, static_cast<float>(rounded)); } template <typename T> void TestRoundtrips() { for (T value : { -std::numeric_limits<T>::infinity(), std::numeric_limits<T>::infinity(), T(-1.0), T(-0.5), T(-0.0), T(1.0), T(0.5), T(0.0), }) { EXPECT_EQ(value, static_cast<T>(static_cast<bfloat16_t>(value))); } } TEST(Bfloat16Test, FloatRoundtrips) { TestRoundtrips<float>(); } TEST(Bfloat16Test, DoubleRoundtrips) { TestRoundtrips<double>(); } TEST(Bfloat16Test, Float16Roundtrips) { TestRoundtrips<bfloat16_t>(); } TEST(Bfloat16Test, ConversionFromFloat) { EXPECT_THAT(bfloat16_t(1.0f), MatchesBits(0x3f80)); EXPECT_THAT(bfloat16_t(0.5f), MatchesBits(0x3f00)); EXPECT_THAT(bfloat16_t(0.33333f), MatchesBits(0x3eab)); EXPECT_THAT(bfloat16_t(3.38e38f), MatchesBits(0x7f7e)); EXPECT_THAT(bfloat16_t(3.40e38f), MatchesBits(0x7f80)); } TEST(Bfloat16Test, RoundToNearestEven) { float val1 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c00})); float val2 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c01})); float val3 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c02})); EXPECT_THAT(bfloat16_t(0.5f * (val1 + val2)), MatchesBits(0x3c00)); EXPECT_THAT(bfloat16_t(0.5f * (val2 + val3)), MatchesBits(0x3c02)); } TEST(Bfloat16Test, ConversionFromInt) { EXPECT_THAT(bfloat16_t(-1), MatchesBits(0xbf80)); EXPECT_THAT(bfloat16_t(0), MatchesBits(0x0000)); EXPECT_THAT(bfloat16_t(1), MatchesBits(0x3f80)); EXPECT_THAT(bfloat16_t(2), MatchesBits(0x4000)); EXPECT_THAT(bfloat16_t(3), MatchesBits(0x4040)); EXPECT_THAT(bfloat16_t(12), MatchesBits(0x4140)); } TEST(Bfloat16Test, ConversionFromBool) { EXPECT_THAT(bfloat16_t(false), MatchesBits(0x0000)); EXPECT_THAT(bfloat16_t(true), MatchesBits(0x3f80)); } TEST(Bfloat16Test, ConversionToBool) { EXPECT_EQ(static_cast<bool>(bfloat16_t(3)), true); EXPECT_EQ(static_cast<bool>(bfloat16_t(0.33333f)), true); EXPECT_EQ(bfloat16_t(-0.0), false); EXPECT_EQ(static_cast<bool>(bfloat16_t(0.0)), false); } TEST(Bfloat16Test, ExplicitConversionToFloat) { EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x0000)), 0.0f); EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)), 1.0f); } TEST(Bfloat16Test, ImplicitConversionToFloat) { EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x0000)), 0.0f); EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)), 1.0f); } TEST(Bfloat16Test, Zero) { EXPECT_EQ(bfloat16_t(0.0f), bfloat16_t(0.0f)); EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(0.0f)); EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(-0.0f)); EXPECT_THAT(bfloat16_t(0.0f), MatchesBits(0x0000)); EXPECT_THAT(bfloat16_t(-0.0f), MatchesBits(0x8000)); } TEST(Bfloat16Test, DefaultConstruct) { EXPECT_EQ(static_cast<float>(bfloat16_t()), 0.0f); } TEST(Bfloat16Test, Truncate0) { TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0xf5c3), BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x49, 0x0000)); } TEST(Bfloat16Test, Truncate1) { TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xf5c3), BinaryToFloat(1, 0x80, 0x48, 0x0000), BinaryToFloat(1, 0x80, 0x49, 0x0000)); } TEST(Bfloat16Test, Truncate2) { TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000), BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x48, 0x0000)); } TEST(Bfloat16Test, Truncate3) { TestTruncate(BinaryToFloat(0, 0xff, 0x00, 0x0001), BinaryToFloat(0, 0xff, 0x40, 0x0000), BinaryToFloat(0, 0xff, 0x40, 0x0000)); } TEST(Bfloat16Test, Truncate4) { TestTruncate(BinaryToFloat(0, 0xff, 0x7f, 0xffff), BinaryToFloat(0, 0xff, 0x40, 0x0000), BinaryToFloat(0, 0xff, 0x40, 0x0000)); } TEST(Bfloat16Test, Truncate5) { TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xc000), BinaryToFloat(1, 0x80, 0x48, 0x0000), BinaryToFloat(1, 0x80, 0x49, 0x0000)); } TEST(Bfloat16Test, Truncate6) { TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x48, 0x0000)); } TEST(Bfloat16Test, Truncate7) { TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x4000), BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x48, 0x0000)); } TEST(Bfloat16Test, Truncate8) { TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000), BinaryToFloat(0, 0x80, 0x48, 0x0000), BinaryToFloat(0, 0x80, 0x48, 0x0000)); } TEST(Bfloat16Test, Truncate9) { TestTruncate(BinaryToFloat(0, 0x00, 0x48, 0x8000), BinaryToFloat(0, 0x00, 0x48, 0x0000), BinaryToFloat(0, 0x00, 0x48, 0x0000)); } TEST(Bfloat16Test, Truncate10) { TestTruncate(BinaryToFloat(0, 0x00, 0x7f, 0xc000), BinaryToFloat(0, 0x00, 0x7f, 0x0000), BinaryToFloat(0, 0x00, 0x80, 0x0000)); } TEST(Bfloat16Test, Conversion) { for (int i = 0; i < 100; ++i) { float a = i + 1.25; bfloat16_t b = static_cast<bfloat16_t>(a); float c = static_cast<float>(b); EXPECT_LE(std::abs(c - a), a / 128); } } TEST(Bfloat16Test, Epsilon) { EXPECT_LE(1.0f, static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() + bfloat16_t(1.0f))); EXPECT_EQ(1.0f, static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() / bfloat16_t(2.0f) + bfloat16_t(1.0f))); } TEST(Bfloat16Test, NextAfter) { const bfloat16_t one(1), two(2), zero(0), nan = std::numeric_limits<bfloat16_t>::quiet_NaN(), epsilon = std::numeric_limits<bfloat16_t>::epsilon(), denorm_min = std::numeric_limits<bfloat16_t>::denorm_min(); EXPECT_EQ(epsilon, nextafter(one, two) - one); EXPECT_EQ(-epsilon / 2, nextafter(one, zero) - one); EXPECT_EQ(one, nextafter(one, one)); EXPECT_EQ(denorm_min, nextafter(zero, one)); EXPECT_EQ(-denorm_min, nextafter(zero, -one)); const bfloat16_t values[] = {zero, -zero, nan}; for (int i = 0; i < 3; ++i) { auto a = values[i]; for (int j = 0; j < 3; ++j) { if (i == j) continue; auto b = values[j]; auto next_float = std::nextafter(static_cast<float>(a), static_cast<float>(b)); auto next_bfloat16 = nextafter(a, b); EXPECT_EQ(std::isnan(next_float), isnan(next_bfloat16)); if (!std::isnan(next_float)) { EXPECT_EQ(next_float, next_bfloat16); } } } EXPECT_EQ(std::numeric_limits<bfloat16_t>::infinity(), nextafter(std::numeric_limits<bfloat16_t>::max(), std::numeric_limits<bfloat16_t>::infinity())); } TEST(Bfloat16Test, Negate) { EXPECT_EQ(static_cast<float>(-bfloat16_t(3.0f)), -3.0f); EXPECT_EQ(static_cast<float>(-bfloat16_t(-4.5f)), 4.5f); } #ifndef _MSC_VER TEST(Bfloat16Test, DivisionByZero) { EXPECT_TRUE(std::isnan(static_cast<float>(bfloat16_t(0.0 / 0.0)))); EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(1.0 / 0.0)))); EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(-1.0 / 0.0)))); EXPECT_TRUE(std::isnan(bfloat16_t(0.0 / 0.0))); EXPECT_TRUE(std::isinf(bfloat16_t(1.0 / 0.0))); EXPECT_TRUE(std::isinf(bfloat16_t(-1.0 / 0.0))); } #endif TEST(Bfloat16Test, NonFinite) { EXPECT_FALSE(std::isinf( static_cast<float>(bfloat16_t(3.38e38f)))); EXPECT_FALSE(std::isnan(static_cast<float>(bfloat16_t(0.0f)))); EXPECT_TRUE(std::isinf( static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xff80)))); EXPECT_TRUE(std::isnan( static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0)))); EXPECT_TRUE(std::isinf( static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80)))); EXPECT_TRUE(std::isnan( static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0)))); EXPECT_FALSE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7bff))); EXPECT_FALSE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x0000))); EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0xff80))); EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0))); EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80))); EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0))); EXPECT_THAT(bfloat16_t(BinaryToFloat(0x0, 0xff, 0x40, 0x0)), MatchesBits(0x7fe0)); EXPECT_THAT(bfloat16_t(BinaryToFloat(0x1, 0xff, 0x40, 0x0)), MatchesBits(0xffe0)); EXPECT_THAT( Float32ToBfloat16Truncate(BinaryToFloat(0x0, 0xff, 0x40, 0x0)), MatchesBits(0x7fe0)); EXPECT_THAT( Float32ToBfloat16Truncate(BinaryToFloat(0x1, 0xff, 0x40, 0x0)), MatchesBits(0xffe0)); } TEST(Bfloat16Test, NumericLimits) { static_assert(std::numeric_limits<bfloat16_t>::is_signed); EXPECT_EQ( absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::infinity()), absl::bit_cast<uint16_t>( bfloat16_t(std::numeric_limits<float>::infinity()))); constexpr uint16_t BFLOAT16_QUIET_BIT = 0x0040; EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::quiet_NaN())); EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::quiet_NaN()))); EXPECT_GT( (absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::quiet_NaN()) & BFLOAT16_QUIET_BIT), 0); EXPECT_GT((absl::bit_cast<uint16_t>( bfloat16_t(std::numeric_limits<float>::quiet_NaN())) & BFLOAT16_QUIET_BIT), 0); EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::signaling_NaN())); EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::signaling_NaN()))); EXPECT_EQ(0, (absl::bit_cast<uint16_t>( std::numeric_limits<bfloat16_t>::signaling_NaN()) & BFLOAT16_QUIET_BIT)); #ifndef _MSC_VER EXPECT_EQ(0, (absl::bit_cast<uint16_t>( bfloat16_t(std::numeric_limits<float>::signaling_NaN())) & BFLOAT16_QUIET_BIT)); #endif EXPECT_GT(std::numeric_limits<bfloat16_t>::min(), bfloat16_t(0.f)); EXPECT_GT(std::numeric_limits<bfloat16_t>::denorm_min(), bfloat16_t(0.f)); EXPECT_EQ(std::numeric_limits<bfloat16_t>::denorm_min() / bfloat16_t(2), bfloat16_t(0.f)); } TEST(Bfloat16Test, Arithmetic) { EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(2)), 4); EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(-2)), 0); EXPECT_THAT(static_cast<float>(bfloat16_t(0.33333f) + bfloat16_t(0.66667f)), NearFloat(1.0f)); EXPECT_EQ(static_cast<float>(bfloat16_t(2.0f) * bfloat16_t(-5.5f)), -11.0f); EXPECT_THAT(static_cast<float>(bfloat16_t(1.0f) / bfloat16_t(3.0f)), NearFloat(0.3339f)); EXPECT_EQ(static_cast<float>(-bfloat16_t(4096.0f)), -4096.0f); EXPECT_EQ(static_cast<float>(-bfloat16_t(-4096.0f)), 4096.0f); } TEST(Bfloat16Test, Comparison) { EXPECT_TRUE(bfloat16_t(1.0f) > bfloat16_t(0.5f)); EXPECT_TRUE(bfloat16_t(0.5f) < bfloat16_t(1.0f)); EXPECT_FALSE((bfloat16_t(1.0f) < bfloat16_t(0.5f))); EXPECT_FALSE((bfloat16_t(0.5f) > bfloat16_t(1.0f))); EXPECT_FALSE((bfloat16_t(4.0f) > bfloat16_t(4.0f))); EXPECT_FALSE((bfloat16_t(4.0f) < bfloat16_t(4.0f))); EXPECT_FALSE((bfloat16_t(0.0f) < bfloat16_t(-0.0f))); EXPECT_FALSE((bfloat16_t(-0.0f) < bfloat16_t(0.0f))); EXPECT_FALSE((bfloat16_t(0.0f) > bfloat16_t(-0.0f))); EXPECT_FALSE((bfloat16_t(-0.0f) > bfloat16_t(0.0f))); EXPECT_TRUE(bfloat16_t(0.2f) > bfloat16_t(-1.0f)); EXPECT_TRUE(bfloat16_t(-1.0f) < bfloat16_t(0.2f)); EXPECT_TRUE(bfloat16_t(-16.0f) < bfloat16_t(-15.0f)); EXPECT_TRUE(bfloat16_t(1.0f) == bfloat16_t(1.0f)); EXPECT_TRUE(bfloat16_t(1.0f) != bfloat16_t(2.0f)); #ifndef _MSC_VER EXPECT_FALSE((bfloat16_t(0.0 / 0.0) == bfloat16_t(0.0 / 0.0))); EXPECT_TRUE(bfloat16_t(0.0 / 0.0) != bfloat16_t(0.0 / 0.0)); EXPECT_FALSE((bfloat16_t(1.0) == bfloat16_t(0.0 / 0.0))); EXPECT_FALSE((bfloat16_t(1.0) < bfloat16_t(0.0 / 0.0))); EXPECT_FALSE((bfloat16_t(1.0) > bfloat16_t(0.0 / 0.0))); EXPECT_TRUE(bfloat16_t(1.0) != bfloat16_t(0.0 / 0.0)); EXPECT_TRUE(bfloat16_t(1.0) < bfloat16_t(1.0 / 0.0)); EXPECT_TRUE(bfloat16_t(1.0) > bfloat16_t(-1.0 / 0.0)); #endif } constexpr float PI = 3.14159265358979323846f; TEST(Bfloat16Test, BasicFunctions) { EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f); EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f); EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f); EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f); EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f); EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f); EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f); EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f); EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f); EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f); EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f); EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f); EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f); EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f); EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f); EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f); EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))), 0.0f); EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))), 0.0f); EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))), 4.0f); EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))), 4.0f); EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f); EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f); EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))), NearFloat(20.f + static_cast<float>(PI))); EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))), NearFloat(20.f + static_cast<float>(PI))); EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f); EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f); EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f)); EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f)); EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f); EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f); EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f)); EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f)); EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f); EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f); EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))), NearFloat(2.390625f)); EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))), NearFloat(2.390625f)); } TEST(Bfloat16Test, TrigonometricFunctions) { EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f)))); EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f)))); EXPECT_FLOAT_EQ(cos(bfloat16_t(PI)), bfloat16_t(std::cos(PI))); EXPECT_NEAR(cos(bfloat16_t(PI / 2)), bfloat16_t(std::cos(PI / 2)), 1e-3); EXPECT_NEAR(cos(bfloat16_t(3 * PI / 2)), bfloat16_t(std::cos(3 * PI / 2)), 1e-2); EXPECT_THAT(cos(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::cos(3.5f)))); EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f))); EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f))); EXPECT_NEAR(sin(bfloat16_t(PI)), bfloat16_t(std::sin(PI)), 1e-3); EXPECT_THAT(sin(bfloat16_t(PI / 2)), NearFloat(bfloat16_t(std::sin(PI / 2)))); EXPECT_THAT(sin(bfloat16_t(3 * PI / 2)), NearFloat(bfloat16_t(std::sin(3 * PI / 2)))); EXPECT_THAT(sin(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::sin(3.5f)))); EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f))); EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f))); EXPECT_NEAR(tan(bfloat16_t(PI)), bfloat16_t(std::tan(PI)), 1e-3); EXPECT_THAT(tan(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::tan(3.5f)))); } TEST(Bfloat16Test, JsonConversion) { EXPECT_THAT(::nlohmann::json(bfloat16_t(1.5)), tensorstore::MatchesJson(1.5)); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16.h
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16_test.cc
4f887a6430414cd6088e1743555015b10f116d50
b9c06777-8747-4fb4-8b7a-eda275f5c353
cpp
tensorflow/tensorflow
hlo_graph_dumper
third_party/xla/xla/service/hlo_graph_dumper.cc
third_party/xla/xla/service/hlo_graph_dumper_test.cc
#include "xla/service/hlo_graph_dumper.h" #include <cstdint> #include <unordered_map> #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/hash/hash.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/shape.h" #include "tsl/platform/errors.h" #include "tsl/platform/file_system.h" #include "tsl/platform/statusor.h" #include "tsl/platform/thread_annotations.h" #ifndef _WIN32 #include <unistd.h> #endif #include <algorithm> #include <atomic> #include <deque> #include <functional> #include <map> #include <memory> #include <optional> #include <queue> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "xla/shape_util.h" #include "xla/stream_executor/dnn.h" #include "xla/tsl/lib/gtl/map_util.h" #include "xla/tsl/lib/io/zlib_compression_options.h" #include "xla/tsl/lib/io/zlib_outputbuffer.h" #include "xla/types.h" #include "xla/util.h" #include "xla/window_util.h" #include "tsl/platform/base64.h" #include "tsl/platform/env.h" #include "tsl/platform/numbers.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/regexp.h" #include "tsl/platform/status.h" namespace xla { namespace { using absl::StrAppend; using absl::StrCat; using absl::StrFormat; using absl::StrJoin; using std::nullopt; using std::optional; enum NodeFilterResult { kNormalNode, kHideNode, kHighlightNode, kSomeOperandsOmitted, kOmitNodeOperands, kSomeUsersOmitted, }; class NodeFilter { public: NodeFilter() : filter_([](const HloInstruction*) { return kNormalNode; }) {} explicit NodeFilter( std::function<NodeFilterResult(const HloInstruction* instr)> filter, std::optional<int> num_rendered = std::nullopt) : filter_(std::move(filter)), num_rendered_(num_rendered) {} bool Show(const HloInstruction* instr) const { return filter_(instr) != kHideNode; } bool Highlight(const HloInstruction* instr) const { return filter_(instr) == kHighlightNode; } bool OmitOperands(const HloInstruction* instr) const { return filter_(instr) == kOmitNodeOperands; } bool SomeOrAllOperandsOmitted(const HloInstruction* instr) const { auto result = filter_(instr); return result == kOmitNodeOperands || result == kSomeOperandsOmitted; } bool Deemphasized(const HloInstruction* instr) const { auto result = filter_(instr); return result == kOmitNodeOperands || result == kSomeOperandsOmitted || result == kSomeUsersOmitted; } std::optional<int> GetNumRendered() const { return num_rendered_; } private: std::function<NodeFilterResult(const HloInstruction* instr)> filter_; std::optional<int> num_rendered_; }; bool IsSmall(const HloInstruction* instr) { if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE_TYPE) || ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) { return true; } return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096; } enum ColorScheme { kBlue, kBrown, kDarkBlue, kDarkGreen, kDarkOrange, kDarkRed, kGray, kGreen, kOrange, kPurple, kRed, kWhite, kYellow, kDashedBorder, }; struct NodeColors { std::string style; std::string fill_color; std::string stroke_color; std::string font_color; }; NodeColors NodeColorsForScheme(ColorScheme color) { switch (color) { case kBlue: return NodeColors{"filled", "#bbdefb", "#8aacc8", "black"}; case kBrown: return NodeColors{"filled", "#bcaaa4", "#8c7b75", "black"}; case kDarkBlue: return NodeColors{"filled", "#1565c0", "#003c8f", "white"}; case kDarkGreen: return NodeColors{"filled", "#2e7d32", "#005005", "white"}; case kDarkOrange: return NodeColors{"filled", "#ffb74d", "#c88719", "black"}; case kDarkRed: return NodeColors{"filled", "#b71c1c", "#7f0000", "white"}; case kGray: return NodeColors{"filled", "#cfd8dc", "#9ea7aa", "black"}; case kGreen: return NodeColors{"filled", "#c8e6c9", "#97b498", "black"}; case kOrange: return NodeColors{"filled", "#ffe0b2", "#cbae82", "black"}; case kPurple: return NodeColors{"filled", "#e1bee7", "#af8eb5", "black"}; case kRed: return NodeColors{"filled", "#ffcdd2", "#cb9ca1", "black"}; case kWhite: return NodeColors{"filled", "white", "#9e9e9e", "black"}; case kYellow: return NodeColors{"filled", "#fff9c4", "#cbc693", "black"}; case kDashedBorder: return NodeColors{"filled,dashed", "white", "#757575", "#757575"}; } } std::string NodeFillColorForStatistic(const Statistic& statistic) { auto stat_val = statistic.stat_val(); if (stat_val == 0) { return "#f5f5f5"; } else if (stat_val < 10) { return "#f7d4cc"; } else if (stat_val < 20) { return "#f8b2a3"; } else if (stat_val < 30) { return "#f9a28f"; } else if (stat_val < 40) { return "#fa917b"; } else if (stat_val < 50) { return "#fb8066"; } else if (stat_val < 60) { return "#fc7052"; } else if (stat_val < 70) { return "#fd5f3d"; } else if (stat_val < 80) { return "#fd4e29"; } else if (stat_val < 90) { return "#fe3e14"; } else { return "#ff2d00"; } } std::string NodeFontColorForStatistic(const Statistic& statistic) { if (statistic.stat_val() < 60) { return "black"; } else { return "white"; } } std::string NodeColorAttributes(ColorScheme color) { NodeColors node_colors = NodeColorsForScheme(color); return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } std::string HtmlLikeStringSanitize(absl::string_view s) { return absl::StrReplaceAll(s, {{"<", "&lt;"}, {">", "&gt;"}, {"\"", "&quot;"}}); } bool IsFusedBroadcastOfConstantEffectiveScalar(const HloInstruction* instr) { namespace m = match; return instr->parent()->IsFusionComputation() && Match(instr, m::Broadcast(m::ConstantEffectiveScalar())); } optional<std::string> MatchTrivialComputation( const HloComputation* computation) { namespace m = match; if (computation->instruction_count() != 3) { return nullopt; } HloInstruction* root = computation->root_instruction(); const HloInstruction *param0, *param1; if (!Match(root, m::Op() .WithNumOperands(2) .WithShape(m::Shape().IsEffectiveScalar()) .WithBinaryOperandsAnyOrder( m::Parameter(&param0, 0) .WithShape(m::Shape().IsEffectiveScalar()), m::Parameter(&param1, 1) .WithShape(m::Shape().IsEffectiveScalar())))) { return nullopt; } if (root->operand(0) == param1) { CHECK_EQ(root->operand(1), param0); if (root->opcode() == HloOpcode()) { switch (root->comparison_direction()) { case ComparisonDirection::kLe: case ComparisonDirection::kGe: case ComparisonDirection::kGt: case ComparisonDirection::kLt: return nullopt; default: break; } } } switch (root->opcode()) { case HloOpcode::kAdd: return "add"; case HloOpcode::kMultiply: return "multiply"; case HloOpcode::kMinimum: return "min"; case HloOpcode::kMaximum: return "max"; case HloOpcode::kXor: return "xor"; case HloOpcode::kAnd: return "and"; case HloOpcode::kOr: return "or"; case HloOpcode::kCompare: { switch (root->comparison_direction()) { case ComparisonDirection::kLe: return "less-or-equal"; case ComparisonDirection::kGe: return "greater-or-equal"; case ComparisonDirection::kGt: return "greater-than"; case ComparisonDirection::kLt: return "less-than"; case ComparisonDirection::kEq: return "equal-to"; case ComparisonDirection::kNe: return "not-equal-to"; } } default: return nullopt; } } class HloDotDumper { public: HloDotDumper( const HloComputation* computation, absl::string_view label, const DebugOptions& debug_options, HloRenderOptions hlo_render_options, NodeFilter filter, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map = std::nullopt) : computation_(computation), label_(label), debug_options_(debug_options), hlo_render_options_(hlo_render_options), filter_(std::move(filter)), color_map_(color_map) {} std::string Dump(); std::optional<std::string> CssIdForInstruction(const HloInstruction& instr) { if (instr.opcode() == HloOpcode::kFusion) { auto it = cluster_ids_.find(instr.called_computations()[0]); if (it == cluster_ids_.end()) { return std::nullopt; } return StrCat("#a_clust", it->second, " path"); } auto it = node_ids_.find(&instr); if (it == node_ids_.end()) { return std::nullopt; } return StrCat("#node", it->second, " polygon"); } private: std::string InstructionId(const HloInstruction* instruction) { return StrCat(reinterpret_cast<uint64_t>(instruction)); } std::string SubcomputationId(const HloComputation* computation) { return StrCat("cluster_", reinterpret_cast<uint64_t>(computation)); } std::string Header(); std::string Footer(); bool ShouldShowSubcomputation(const HloComputation* subcomp); bool ShouldShowFusionSubcomputation(const HloInstruction* instr); bool ShouldMergeIntoUsers(const HloInstruction* instr) const; std::string DumpSubcomputation(const HloComputation* subcomp, const HloInstruction* parent_instr); std::string DumpComputation(const HloComputation* comp); std::string DumpRootTag(); std::string DumpInstruction(const HloInstruction* instr); ColorScheme GetInstructionColor(const HloInstruction* instr); std::string GetInstructionNodeShape(const HloInstruction* instr); std::string GetInstructionNodeLabel(const HloInstruction* instr); std::string GetInstructionNodeMetadata(const HloInstruction* instr); std::string GetInstructionNodeBackendConfig(const HloInstruction* instr); std::string GetInstructionNodeExtraInfo(const HloInstruction* instr); std::string GetInstructionNodeInlinedOperands(const HloInstruction* instr); void AddInstructionIncomingEdges(const HloInstruction* instr); const HloInstruction* GetNodeForEdge(const HloInstruction* instr); std::string GetInstructionTrivialComputationStr(const HloInstruction* instr); const HloComputation* computation_; const std::string label_; const DebugOptions& debug_options_; const HloRenderOptions hlo_render_options_; const NodeFilter filter_; const std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map_; int64_t next_node_id_ = 1; absl::flat_hash_map<const HloInstruction*, int64_t> node_ids_; int64_t root_node_id_; int64_t next_edge_id_ = 1; std::unordered_multimap< std::pair<const HloInstruction*, const HloInstruction*>, int64_t, absl::Hash<std::pair<const HloInstruction*, const HloInstruction*>>> edge_ids_; int64_t next_cluster_id_ = 1; absl::flat_hash_map<const HloComputation*, int64_t> cluster_ids_; std::vector<std::string> edges_; absl::flat_hash_map<HloSharding, ColorScheme> sharding_colors_; int64_t next_shard_color_ = 0; }; std::string HloDotDumper::Dump() { std::string body; StrAppend(&body, DumpComputation(computation_)); StrAppend(&body, DumpRootTag()); std::string g = Header(); StrAppend(&g, body); StrAppend(&g, Footer()); return g; } std::string HloDotDumper::Header() { constexpr char fmt[] = R"(digraph G { rankdir = TB; compound = true; label = <<b>%s</b>>; labelloc = t; tooltip = " "; stylesheet=< data:text/css, @import url(https: svg text { font-family: 'Roboto'; font-size: 12px; } %s > )"; VLOG(3) << "Generating Header"; std::string graph_label = StrCat(label_, "<br/>Computation ", computation_->name()); if (computation_->IsFusionComputation()) { StrAppend(&graph_label, " (in fusion instruction ", computation_->FusionInstruction()->name(), ")"); } std::vector<std::string> edge_css_rules; std::string kBlue = "#1976d2"; std::string kRed = "#d32f2f"; for (const auto& kv : edge_ids_) { const HloInstruction* from_node = kv.first.first; const HloInstruction* to_node = kv.first.second; int64_t edge_id = kv.second; auto add_hover_css_rule = [&](std::string elem_type, int64_t elem_id, std::string color) { edge_css_rules.push_back( StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n" " #%s%d:hover ~ #edge%d path { " "stroke: %s; stroke-width: .2em; }\n" " #%s%d:hover ~ #edge%d polygon { " "fill: %s; stroke: %s; stroke-width: .2em; }\n", elem_type, elem_id, edge_id, color, elem_type, elem_id, edge_id, color, elem_type, elem_id, edge_id, color, color)); }; int64_t from_node_id = tsl::gtl::FindWithDefault(node_ids_, from_node, -1); if (from_node_id == -1) { LOG(FATAL) << from_node->name() << " was added to edges but not to nodes"; } int64_t to_node_id = to_node ? tsl::gtl::FindWithDefault(node_ids_, to_node, -1) : root_node_id_; if (to_node != nullptr && to_node_id == -1) { LOG(FATAL) << to_node->name() << " was added to edges but not to nodes"; } add_hover_css_rule("node", from_node_id, kBlue); add_hover_css_rule("node", to_node_id, kRed); if (to_node) { VLOG(3) << "Adding css for edge " << edge_id << " from node " << from_node->name() << " to node " << to_node->name(); } else { VLOG(3) << "Adding css for edge " << edge_id << " from node " << from_node->name() << " to root tag"; } if (to_node) { if (from_node->IsFused() && from_node->parent()->root_instruction() == from_node) { int64_t cluster_id = cluster_ids_.at(from_node->parent()); add_hover_css_rule("clust", cluster_id, kBlue); } if (to_node->IsFused() && to_node->opcode() == HloOpcode::kParameter) { int64_t cluster_id = cluster_ids_.at(to_node->parent()); add_hover_css_rule("clust", cluster_id, kRed); } } } return StrFormat( fmt, graph_label, absl::StrReplaceAll(StrJoin(edge_css_rules, "\n"), {{"#", "%23"}})); } std::string HloDotDumper::Footer() { return StrCat(StrJoin(edges_, "\n"), "\n}"); } bool HloDotDumper::ShouldShowFusionSubcomputation(const HloInstruction* instr) { CHECK_EQ(instr->opcode(), HloOpcode::kFusion); return ShouldShowSubcomputation(instr->fused_instructions_computation()); } bool HloDotDumper::ShouldShowSubcomputation(const HloComputation* subcomp) { if (subcomp->IsFusionComputation()) { const HloInstruction* fusion = subcomp->FusionInstruction(); if (!filter_.Show(fusion) || filter_.SomeOrAllOperandsOmitted(fusion) || !hlo_render_options_.show_fusion_subcomputations) { return false; } } if (!subcomp->IsFusionComputation() && MatchTrivialComputation(subcomp)) { return false; } if (subcomp->WhileCallInstruction() != nullptr && !hlo_render_options_.show_while_subcomputations) { return false; } return absl::c_any_of( subcomp->instructions(), [&](const HloInstruction* instr) { return filter_.Show(instr); }); } std::string HloDotDumper::DumpSubcomputation( const HloComputation* subcomp, const HloInstruction* parent_instr) { VLOG(2) << "Dumping subcomputation " << subcomp->name(); if (parent_instr->opcode() != HloOpcode::kFusion) { const HloInstruction* from = GetNodeForEdge(subcomp->root_instruction()); VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name() << " as " << next_edge_id_; edge_ids_.insert({{from, parent_instr}, next_edge_id_++}); constexpr char edge_fmt[] = R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)"; edges_.push_back(StrFormat( edge_fmt, InstructionId(from), InstructionId(parent_instr), SubcomputationId(subcomp), subcomp->name(), parent_instr->name())); } if (cluster_ids_.find(subcomp) != cluster_ids_.end()) { return ""; } cluster_ids_[subcomp] = next_cluster_id_++; std::string id = SubcomputationId(subcomp); std::string subcomp_label, style; if (parent_instr->opcode() == HloOpcode::kFusion) { subcomp_label = StrFormat("Fused expression for <b>%s</b><br/>%s", HtmlLikeStringSanitize(parent_instr->name()), HtmlLikeStringSanitize(parent_instr->ToCategory())); std::string extra_info = GetInstructionNodeExtraInfo(parent_instr); if (!extra_info.empty()) { StrAppend(&subcomp_label, "<br/>", extra_info); } std::string node_backend_config = GetInstructionNodeBackendConfig(parent_instr); if (!node_backend_config.empty()) { StrAppend(&subcomp_label, "<br/>", node_backend_config); } bool highlight = filter_.Highlight(parent_instr); std::string fillcolor; std::string strokecolor; if (!highlight && (parent_instr->module_has_statistics() || parent_instr->has_statistics())) { fillcolor = parent_instr->has_statistics() ? NodeFillColorForStatistic( parent_instr->statistic_to_visualize()) : "#f5f5f5"; strokecolor = "#c2c2c2"; } else if (debug_options_.xla_hlo_graph_sharding_color() && !highlight) { NodeColors node_colors = NodeColorsForScheme(GetInstructionColor(parent_instr)); fillcolor = node_colors.fill_color; strokecolor = node_colors.stroke_color; } else { fillcolor = highlight ? "#ffcdd2" : "#f5f5f5"; strokecolor = highlight ? "#b71c1c" : "#c2c2c2"; } style = StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")", fillcolor, strokecolor); } else { subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s", HtmlLikeStringSanitize(parent_instr->name()), HtmlLikeStringSanitize(subcomp->name())); style = "style=rounded; color=black;"; } std::string comp_body = DumpComputation(subcomp); constexpr char computation_fmt[] = R"(subgraph %s { %s label = <%s>; labelloc = t; tooltip = " "; %s } )"; return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id); } std::string HloDotDumper::DumpComputation(const HloComputation* comp) { std::string g; for (const auto* instr : comp->instructions()) { if (!filter_.Show(instr)) { continue; } for (const HloComputation* subcomp : instr->called_computations()) { if (ShouldShowSubcomputation(subcomp)) { StrAppend(&g, DumpSubcomputation(subcomp, instr)); } } StrAppend(&g, DumpInstruction(instr)); } return g; } std::string HloDotDumper::DumpRootTag() { const HloInstruction* from = GetNodeForEdge(computation_->root_instruction()); if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(from)) { return ""; } auto from_id = InstructionId(from); HloInstruction* to = nullptr; auto to_id = SubcomputationId(computation_); std::string node_body = "ROOT"; std::string node_shape = "circle"; ColorScheme color = kBrown; VLOG(2) << "Adding root tag as node " << next_node_id_; root_node_id_ = next_node_id_++; VLOG(2) << "Adding edge from " << from->name() << " to root tag as " << next_edge_id_; edge_ids_.insert({{from, to}, next_edge_id_++}); edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id)); return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)" "\n", to_id, node_body, node_shape, NodeColorAttributes(color)); } static const HloConstantInstruction* TryGetFusionParameterConstant( const HloInstruction* instr) { if (instr->opcode() != HloOpcode::kParameter || !instr->IsFused()) { return nullptr; } const HloInstruction* fusion = instr->parent()->FusionInstruction(); const HloInstruction* operand = fusion->operand(instr->parameter_number()); return DynCast<HloConstantInstruction>(operand); } bool HloDotDumper::ShouldMergeIntoUsers(const HloInstruction* instr) const { if ((instr->opcode() == HloOpcode::kGetTupleElement && instr != instr->parent()->root_instruction()) || TryGetFusionParameterConstant(instr) != nullptr) { return true; } const int kMinUsersToOmit = 3; return instr->opcode() == HloOpcode::kParameter && instr->shape().IsTuple() && !instr->IsFused() && absl::c_count_if(instr->users(), [&](const HloInstruction* user) { return filter_.Show(user); }) > kMinUsersToOmit && absl::c_all_of(instr->users(), [&](const HloInstruction* user) { return !filter_.Show(user) || user->opcode() == HloOpcode::kGetTupleElement; }); } std::string HloDotDumper::DumpInstruction(const HloInstruction* instr) { if ((instr->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(instr)) && instr != instr->parent()->root_instruction()) { return ""; } if (ShouldMergeIntoUsers(instr)) { return ""; } if (instr->opcode() == HloOpcode::kFusion && ShouldShowFusionSubcomputation(instr)) { return ""; } VLOG(2) << "Adding node " << instr->name() << " as " << next_node_id_; node_ids_[instr] = next_node_id_++; std::string node_shape = GetInstructionNodeShape(instr); std::string node_label = GetInstructionNodeLabel(instr); std::string node_metadata = GetInstructionNodeMetadata(instr); std::string node_backend_config = GetInstructionNodeBackendConfig(instr); std::string extra_info = GetInstructionNodeExtraInfo(instr); std::string inlined_constants = GetInstructionNodeInlinedOperands(instr); std::string trivial_subcomputation = GetInstructionTrivialComputationStr(instr); AddInstructionIncomingEdges(instr); NodeColors node_colors; std::string node_style; std::string node_attributes; if (hlo_render_options_.override_node_colors && color_map_.has_value()) { if (color_map_->contains(instr)) { node_colors.fill_color = color_map_->at(instr).color; node_attributes = color_map_->at(instr).stats; } else { VLOG(2) << "color_map_ for instruction:" << instr->name() << "is empty" << "\n"; node_colors.fill_color = "#808080"; } node_colors.style = "filled"; node_colors.font_color = "black"; node_colors.stroke_color = "#c2c2c2"; node_style = StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } else { ColorScheme color = GetInstructionColor(instr); if (!debug_options_.xla_hlo_graph_sharding_color()) { if (filter_.Deemphasized(instr)) { color = kDashedBorder; } if (filter_.Highlight(instr)) { node_shape = "diamond"; color = kDarkRed; } } node_colors = NodeColorsForScheme(color); if (instr->has_statistics()) { const auto& statistic_to_visualize = instr->statistic_to_visualize(); node_colors.fill_color = NodeFillColorForStatistic(statistic_to_visualize); node_colors.stroke_color = "#c2c2c2"; node_colors.font_color = NodeFontColorForStatistic(statistic_to_visualize); } else if (instr->module_has_statistics()) { node_colors.fill_color = "#f5f5f5"; node_colors.stroke_color = "#c2c2c2"; node_colors.font_color = "black"; } node_style = StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")", node_colors.style, node_colors.font_color, node_colors.stroke_color, node_colors.fill_color); } std::string node_body = node_label; for (const std::string& s : {trivial_subcomputation, extra_info, inlined_constants, node_backend_config, node_attributes}) { if (!s.empty()) { StrAppend(&node_body, "<br/>", s); } } return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)" "\n", InstructionId(instr), node_body, node_shape, node_metadata, node_style); } std::string HloDotDumper::GetInstructionNodeInlinedOperands( const HloInstruction* instr) { auto stringify_constant = [](const HloConstantInstruction* constant, const Shape& shape) { if (ShapeUtil::IsZeroElementArray(shape)) { return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape())); } optional<int64_t> elem_count; if (shape.IsArray()) { elem_count = ShapeUtil::ElementsIn(constant->shape()); } if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) { std::string literal_str = constant->literal().ToStringWithoutShape(); if (literal_str.size() <= 64) { return StrFormat("%s %s", shape.ToString(), literal_str); } } std::string constant_name; if (absl::StartsWith(constant->name(), "constant")) { constant_name = std::string(constant->name()); } else { constant_name = StrCat("constant ", constant->name()); } return StrFormat("%s %s", constant_name, ShapeUtil::HumanString(shape)); }; std::vector<std::string> lines; constexpr int64_t kMaxOperandsShown = 32; for (int64_t i = 0; i < instr->operand_count(); ++i) { const HloInstruction* operand = instr->operand(i); optional<std::string> operand_str; if (const auto* constant_operand = DynCast<HloConstantInstruction>(operand)) { operand_str = stringify_constant(constant_operand, constant_operand->shape()); } else if (IsFusedBroadcastOfConstantEffectiveScalar(operand)) { operand_str = stringify_constant( Cast<HloConstantInstruction>(operand->operand(0)), operand->shape()); } else if (ShouldMergeIntoUsers(operand)) { if (operand->opcode() == HloOpcode::kParameter) { if (const HloConstantInstruction* constant = TryGetFusionParameterConstant(operand)) { operand_str = stringify_constant(constant, constant->shape()); } else { operand_str = StrFormat("Parameter %d", operand->parameter_number()); } } else if (operand->opcode() == HloOpcode::kGetTupleElement) { operand_str = StrFormat("tuple-element %d of %s %s", operand->tuple_index(), operand->operand(0)->name(), ShapeUtil::HumanStringWithLayout(operand->shape())); } else { operand_str = std::string(operand->name()); } } if (operand_str) { if (instr->operand_count() > 1) { lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str)); } else { lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str)); } } if (lines.size() == kMaxOperandsShown && i < instr->operand_count() - 1) { lines.push_back("..."); break; } } if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) { const HloInstruction* param_input = instr->parent()->FusionInstruction()->operand( instr->parameter_number()); if (param_input->opcode() == HloOpcode::kGetTupleElement) { lines.push_back( StrFormat("tuple-element %d of %s %s", param_input->tuple_index(), param_input->operand(0)->name(), ShapeUtil::HumanStringWithLayout(param_input->shape()))); } } return StrJoin(lines, "<br/>"); } ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) { if (debug_options_.xla_hlo_graph_sharding_color()) { if (!instr->has_sharding()) { return kDashedBorder; } auto it = sharding_colors_.find(instr->sharding()); if (it != sharding_colors_.end()) { return it->second; } ColorScheme color = static_cast<ColorScheme>( kBlue + (next_shard_color_++ % (kDashedBorder - kBlue))); sharding_colors_.emplace(instr->sharding(), color); return color; } auto parameter_color = IsSmall(instr) ? kOrange : kDarkOrange; if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) { return operand->opcode() == HloOpcode::kParameter && ShouldMergeIntoUsers(operand) && TryGetFusionParameterConstant(operand) == nullptr; })) { return parameter_color; } switch (instr->opcode()) { case HloOpcode::kAbs: case HloOpcode::kAdd: case HloOpcode::kAnd: case HloOpcode::kAtan2: case HloOpcode::kBitcastConvert: case HloOpcode::kCeil: case HloOpcode::kClamp: case HloOpcode::kClz: case HloOpcode::kCompare: case HloOpcode::kComplex: case HloOpcode::kConvert: case HloOpcode::kCos: case HloOpcode::kDivide: case HloOpcode::kErf: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kFloor: case HloOpcode::kImag: case HloOpcode::kIota: case HloOpcode::kIsFinite: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kMaximum: case HloOpcode::kMinimum: case HloOpcode::kMultiply: case HloOpcode::kNegate: case HloOpcode::kNot: case HloOpcode::kPopulationCount: case HloOpcode::kOr: case HloOpcode::kXor: case HloOpcode::kPower: case HloOpcode::kReal: case HloOpcode::kReducePrecision: case HloOpcode::kRemainder: case HloOpcode::kRng: case HloOpcode::kRngGetAndUpdateState: case HloOpcode::kRngBitGenerator: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kRsqrt: case HloOpcode::kSelect: case HloOpcode::kShiftLeft: case HloOpcode::kShiftRightArithmetic: case HloOpcode::kShiftRightLogical: case HloOpcode::kStochasticConvert: case HloOpcode::kLogistic: case HloOpcode::kSign: case HloOpcode::kSin: case HloOpcode::kSlice: case HloOpcode::kSort: case HloOpcode::kTopK: case HloOpcode::kSqrt: case HloOpcode::kCbrt: case HloOpcode::kSubtract: case HloOpcode::kTan: case HloOpcode::kTanh: return kWhite; case HloOpcode::kAddDependency: case HloOpcode::kAfterAll: case HloOpcode::kGetTupleElement: case HloOpcode::kOptimizationBarrier: case HloOpcode::kPad: case HloOpcode::kTuple: return kWhite; case HloOpcode::kConstant: return kWhite; case HloOpcode::kBroadcast: case HloOpcode::kDynamicUpdateSlice: return kYellow; case HloOpcode::kConcatenate: case HloOpcode::kDynamicSlice: case HloOpcode::kReshape: case HloOpcode::kDynamicReshape: case HloOpcode::kReverse: case HloOpcode::kTranspose: return kGreen; case HloOpcode::kCopy: case HloOpcode::kCopyStart: case HloOpcode::kCopyDone: return kGreen; case HloOpcode::kBitcast: if (!instr->IsFused()) { return kWhite; } return kGreen; case HloOpcode::kAsyncStart: case HloOpcode::kAsyncUpdate: case HloOpcode::kAsyncDone: return GetInstructionColor(instr->async_wrapped_instruction()); case HloOpcode::kConvolution: case HloOpcode::kDot: case HloOpcode::kFft: case HloOpcode::kTriangularSolve: case HloOpcode::kCholesky: return kDarkBlue; case HloOpcode::kParameter: return parameter_color; case HloOpcode::kBatchNormGrad: case HloOpcode::kBatchNormInference: case HloOpcode::kBatchNormTraining: case HloOpcode::kReduce: case HloOpcode::kReduceWindow: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kGather: return kPurple; case HloOpcode::kDomain: case HloOpcode::kFusion: case HloOpcode::kMap: case HloOpcode::kGetDimensionSize: case HloOpcode::kSetDimensionSize: return kGray; case HloOpcode::kAllGather: case HloOpcode::kAllGatherStart: case HloOpcode::kAllGatherDone: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: case HloOpcode::kAllReduceStart: case HloOpcode::kAllReduceDone: case HloOpcode::kAllToAll: case HloOpcode::kCollectiveBroadcast: case HloOpcode::kCollectivePermute: case HloOpcode::kCollectivePermuteStart: case HloOpcode::kCollectivePermuteDone: case HloOpcode::kInfeed: case HloOpcode::kOutfeed: case HloOpcode::kPartitionId: case HloOpcode::kRecv: case HloOpcode::kRecvDone: case HloOpcode::kSend: case HloOpcode::kSendDone: case HloOpcode::kReplicaId: return kBrown; case HloOpcode::kCall: case HloOpcode::kConditional: case HloOpcode::kCustomCall: case HloOpcode::kWhile: return kDarkGreen; } } std::string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) { switch (instr->opcode()) { case HloOpcode::kWhile: return "ellipse"; default: return "rect"; } } std::string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kParameter) { return StrFormat("<b>Parameter %d</b>", instr->parameter_number()); } if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) { return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name())); } std::string extended_opcode = StrCat(HloOpcodeString(instr->opcode()), instr->opcode() != HloOpcode::kFusion ? "" : StrCat(":", xla::ToString(instr->fusion_kind()))); return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(instr->name()), HtmlLikeStringSanitize(extended_opcode)); } std::string HloDotDumper::GetInstructionNodeMetadata( const HloInstruction* instr) { std::vector<std::string> lines; if (!instr->metadata().op_name().empty()) { lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name())); } if (!instr->metadata().op_type().empty()) { lines.push_back(StrFormat( "op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type()))); } if (!instr->metadata().source_file().empty() && instr->metadata().source_line() != 0) { lines.push_back(StrFormat("source: %s:%d", instr->metadata().source_file(), instr->metadata().source_line())); } if (instr->metadata().stack_frame_id() != 0) { auto hlo_module = instr->parent()->parent(); int frame_id = instr->metadata().stack_frame_id(); while (frame_id != 0) { HloModule::StackFrame frame = hlo_module->get_stack_frame(frame_id); if (frame.empty()) { break; } frame_id = frame.parent_frame_id; lines.push_back(StrFormat( "%s:%s:%d%s", frame.file_name, frame.function_name, frame.line, frame.column == 0 ? "" : StrFormat(":%d", frame.column))); } } return StrJoin(lines, "\n"); } static std::vector<std::pair<std::string, std::string>> ExtractCudnnConvBackendConfigProps(const gpu::CudnnConvBackendConfig& config) { std::vector<std::pair<std::string, std::string>> props; if (config.conv_result_scale() != 1) { props.emplace_back("conv_result_scale", StrCat(config.conv_result_scale())); } if (config.side_input_scale() != 0 && config.side_input_scale() != 1) { props.emplace_back("side_input_scale", StrCat(config.side_input_scale())); } if (config.activation_mode() == se::dnn::ActivationMode::kLeakyRelu) { props.emplace_back("leakyrelu_alpha", StrCat(config.leakyrelu_alpha())); } props.emplace_back( "activation_mode", se::dnn::ActivationModeString( static_cast<se::dnn::ActivationMode>(config.activation_mode()))); props.emplace_back("algo", se::dnn::AlgorithmDesc(config.algorithm()).ToString()); return props; } static std::vector<std::pair<std::string, std::string>> ExtractGemmBackendConfigProps(const gpu::GemmBackendConfig& config, const HloInstruction* instr) { std::vector<std::pair<std::string, std::string>> props; if (primitive_util::IsComplexType(instr->shape().element_type())) { if (config.alpha_real() != 1 || config.alpha_imag() != 1) { props.emplace_back("alpha_real", StrCat(config.alpha_real())); props.emplace_back("alpha_imag", StrCat(config.alpha_real())); } } else { if (config.alpha_real() != 1) { props.emplace_back("alpha", StrCat(config.alpha_real())); } } if (config.beta() != 0 && config.beta() != 1) { props.emplace_back("beta", StrCat(config.beta())); } props.emplace_back( "", absl::StrReplaceAll( DotDimensionNumbersToString(config.dot_dimension_numbers()), {{", ", "<br/>"}})); if (config.algorithm_case() == gpu::GemmBackendConfig::kSelectedAlgorithm) { props.emplace_back("algorithm", StrCat(config.selected_algorithm())); } if (config.epilogue() != gpu::GemmBackendConfig::DEFAULT) { props.emplace_back( "epilogue", gpu::GemmBackendConfig::Epilogue_Name(config.epilogue())); } return props; } std::string HloDotDumper::GetInstructionNodeBackendConfig( const HloInstruction* instr) { std::vector<std::pair<std::string, std::string>> props; if (gpu::IsCustomCallToDnnConvolution(*instr)) { absl::StatusOr<gpu::GpuBackendConfig> config = instr->backend_config<gpu::GpuBackendConfig>(); if (config.ok()) { props = ExtractCudnnConvBackendConfigProps( config->cudnn_conv_backend_config()); } } else if (gpu::IsCublasGemm(*instr)) { absl::StatusOr<gpu::GpuBackendConfig> config = instr->backend_config<gpu::GpuBackendConfig>(); if (config.ok()) { props = ExtractGemmBackendConfigProps(config->gemm_backend_config(), instr); } } if (!props.empty()) { return StrCat((props.size() > 1 ? "<br/>" : ""), StrJoin(props, "<br/>", [](std::string* out, const std::pair<std::string, std::string>& kv) { if (!kv.first.empty()) { return StrAppend(out, kv.first, "=", kv.second); } StrAppend(out, kv.second); })); } if (!hlo_render_options_.show_backend_config || instr->raw_backend_config_string().empty()) { return ""; } return StrCat("backend_config=\"", instr->raw_backend_config_string(), "\""); } std::string HloDotDumper::GetInstructionNodeExtraInfo( const HloInstruction* instr) { std::vector<std::string> lines; for (const auto& line : instr->ExtraAttributesToString( HloPrintOptions().set_print_subcomputation_mode( HloPrintOptions::PrintSubcomputationMode::kOff))) { constexpr int kMaxDeviceIdFieldLen = 128; if ((absl::StartsWith(line, "replica_groups=") || absl::StartsWith(line, "source_target_pairs=") || absl::StartsWith(line, "control-predecessors=")) && line.length() > kMaxDeviceIdFieldLen) { lines.push_back(HtmlLikeStringSanitize( StrCat(line.substr(0, kMaxDeviceIdFieldLen - 3), "..."))); } else if (absl::StartsWith(line, "feature_group_count=")) { lines.push_back(StrFormat("<b>%s</b>", HtmlLikeStringSanitize(line))); } else { lines.push_back(HtmlLikeStringSanitize(line)); } } if (instr->opcode() != HloOpcode::kFusion || !ShouldShowFusionSubcomputation(instr)) { bool shape_is_multidim = false; ShapeUtil::ForEachSubshape(instr->shape(), [&](const Shape& s, const ShapeIndex&) { shape_is_multidim |= s.dimensions_size() > 1; }); std::string instr_shape; if (instr->opcode() != HloOpcode::kTuple && shape_is_multidim) { instr_shape = ShapeUtil::HumanStringWithLayout(instr->shape()); } else { instr_shape = ShapeUtil::HumanString(instr->shape()); } constexpr int kMaxShapeLen = 64; if (instr_shape.length() > kMaxShapeLen) { instr_shape = StrCat( absl::string_view(instr_shape).substr(0, kMaxShapeLen - 3), "..."); } lines.push_back(HtmlLikeStringSanitize(instr_shape)); } if (debug_options_.xla_hlo_graph_addresses()) { lines.push_back(StrFormat("[%p]", instr)); } return StrJoin(lines, "<br/>"); } void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) { constexpr int kMaxEdgesBetweenTwoNodes = 64; auto add_edge = [&](const HloInstruction* from, const HloInstruction* to, int64_t operand_num, bool control_edge = false) { if (edge_ids_.count({from, to}) > kMaxEdgesBetweenTwoNodes) { return; } from = GetNodeForEdge(from); if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant || IsFusedBroadcastOfConstantEffectiveScalar(from) || ShouldMergeIntoUsers(from)) { return; } VLOG(2) << "Adding edge from " << from->name() << " to " << to->name() << " as " << next_edge_id_; edge_ids_.insert({{from, to}, next_edge_id_++}); std::string edge_label; if (control_edge) { edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\""; } else if (instr->operand_count() > 1) { edge_label = StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num); } constexpr char kEdgeFmt[] = R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)"; edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to), (IsSmall(from) ? "empty" : "normal"), from->name(), to->name(), edge_label)); }; if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) { if (instr->parent() != computation_) { const HloInstruction* fusion = instr->parent()->FusionInstruction(); add_edge(fusion->operand(instr->parameter_number()), instr, 0); } } else { for (int64_t i = 0; i < instr->operand_count(); ++i) { add_edge(instr->operand(i), instr, i); } for (const HloInstruction* pred : instr->control_predecessors()) { add_edge(pred, instr, 0, true); } } } std::string HloDotDumper::GetInstructionTrivialComputationStr( const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kFusion) { return ""; } std::vector<std::string> lines; for (int64_t i = 0; i < instr->called_computations().size(); ++i) { optional<std::string> computation_type = MatchTrivialComputation(instr->called_computations()[i]); if (!computation_type) { continue; } if (instr->called_computations().size() == 1) { lines.push_back(StrFormat("Subcomputation: <b>%s</b>", HtmlLikeStringSanitize(*computation_type))); } else { lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i, HtmlLikeStringSanitize(*computation_type))); } } return StrJoin(lines, "<br/>"); } const HloInstruction* HloDotDumper::GetNodeForEdge( const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kGetTupleElement) { instr = instr->operand(0); } while (instr->opcode() == HloOpcode::kFusion && ShouldShowFusionSubcomputation(instr)) { instr = instr->fused_expression_root(); } return instr; } NodeFilter MakeNodeRadiusAroundFilter( const HloInstruction* root, int64_t radius, const absl::flat_hash_set<const HloInstruction*>& boundary) { absl::flat_hash_map<const HloInstruction*, NodeFilterResult> nodes; std::deque<std::pair<const HloInstruction*, int64_t>> worklist; worklist.push_back({root, 0}); while (!worklist.empty()) { const HloInstruction* instr; int64_t depth; std::tie(instr, depth) = worklist.front(); worklist.pop_front(); nodes[instr] = kNormalNode; if (depth == radius) { continue; } if (boundary.contains(instr)) { continue; } if (instr == root || instr->opcode() != HloOpcode::kTuple) { for (const HloInstruction* operand : instr->operands()) { if (!nodes.contains(operand)) { int new_depth = (operand->opcode() == HloOpcode::kBitcast || instr->opcode() == HloOpcode::kBitcast) ? depth : depth + 1; worklist.push_back({operand, new_depth}); } } } for (const HloComputation* computation : instr->called_computations()) { worklist.push_back({computation->root_instruction(), depth + 1}); } if (instr->opcode() == HloOpcode::kConstant) { continue; } constexpr int kMaxUsersToRender = 16; if (instr->user_count() > kMaxUsersToRender) { nodes[instr] = kSomeUsersOmitted; continue; } for (const HloInstruction* user : instr->users()) { if (!nodes.contains(user)) { worklist.push_back({user, depth + 1}); } } } auto is_displayed = [&](const HloInstruction* instr) { return nodes.contains(instr) || instr->opcode() == HloOpcode::kConstant || instr->parent() != root->parent(); }; for (auto& kv : nodes) { const HloInstruction* instr = kv.first; NodeFilterResult& filter_result = kv.second; const auto& operands = instr->operands(); if (absl::c_any_of(operands, is_displayed) && !absl::c_all_of(operands, is_displayed)) { filter_result = kSomeOperandsOmitted; } else if (!operands.empty() && absl::c_none_of(operands, is_displayed)) { filter_result = kOmitNodeOperands; } if (filter_result == kSomeUsersOmitted && absl::c_all_of(instr->users(), is_displayed)) { filter_result = kNormalNode; } } nodes[root] = kHighlightNode; return NodeFilter( [=](const HloInstruction* instr) { auto it = nodes.find(instr); if (it != nodes.end()) { return it->second; } if (instr->parent() != root->parent()) { return kNormalNode; } return kHideNode; }, nodes.size()); } NodeFilter MakeNodeFromToFilter(const HloInstruction* from, const HloInstruction* to, int64_t max_nodes, bool* hit_limit) { *hit_limit = false; std::deque<std::vector<const HloInstruction*>> queue; queue.push_front({from}); absl::flat_hash_set<const HloInstruction*> visited; absl::flat_hash_set<const HloInstruction*> to_display = {from, to}; while (!queue.empty() && to_display.size() < max_nodes) { std::vector<const HloInstruction*> path = std::move(queue.front()); queue.pop_front(); if (!visited.insert(path.back()).second) { continue; } for (const auto* user : path.back()->users()) { if (user == to) { auto it = path.begin(); for (; it != path.end() && to_display.size() < max_nodes; ++it) { to_display.insert(*it); } if (it != path.end()) { *hit_limit = true; } } else if (!visited.count(user)) { auto new_path = path; new_path.push_back(user); queue.push_back(std::move(new_path)); } } } return NodeFilter([=](const HloInstruction* instr) { if (instr == from || instr == to) { return kHighlightNode; } return to_display.count(instr) ? kNormalNode : kHideNode; }); } absl::Mutex url_renderer_mu(absl::kConstInit); std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer ABSL_GUARDED_BY(url_renderer_mu) = nullptr; absl::Mutex fusion_visualizer_state_mu(absl::kConstInit); namespace { struct FusionVisualizerProgress { void AddState(absl::string_view dot, absl::string_view explanation, std::optional<std::string> to_highlight) { if (dot_graphs.empty() || dot_graphs.back() != dot) { dot_graphs.push_back(std::string(dot)); } frames.push_back({static_cast<int>(dot_graphs.size() - 1), std::string(explanation), to_highlight.value_or("")}); } std::vector<std::string> dot_graphs; struct FusionFrame { int dot_graph; std::string label; std::string to_highlight; }; std::vector<FusionFrame> frames; }; } static auto& fusion_visualizer_states TF_GUARDED_BY(fusion_visualizer_state_mu) = *new absl::flat_hash_map< std::pair<int64_t, int64_t>, FusionVisualizerProgress>(); static std::pair<int, int> FusionVisualizerStateKey( const HloComputation& computation) { return std::make_pair(computation.parent()->unique_id(), computation.unique_id()); } } static absl::StatusOr<std::string> CompressAndEncode(absl::string_view input) { class WritableStringFile : public tsl::WritableFile { public: explicit WritableStringFile(std::string* data) : data_(data){}; ~WritableStringFile() override = default; absl::Status Append(absl::string_view data) override { absl::StrAppend(data_, data); return absl::OkStatus(); } absl::Status Close() override { return absl::OkStatus(); } absl::Status Flush() override { return absl::OkStatus(); } absl::Status Sync() override { return absl::OkStatus(); } private: std::string* data_; }; std::string compressed; WritableStringFile f(&compressed); auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP(); tsl::io::ZlibOutputBuffer gz_file(&f, gz_opts.input_buffer_size, gz_opts.output_buffer_size, gz_opts); TF_RETURN_IF_ERROR(gz_file.Init()); TF_RETURN_IF_ERROR(gz_file.Append(input)); TF_RETURN_IF_ERROR(gz_file.Close()); std::string encoded; TF_RETURN_IF_ERROR(tsl::Base64Encode(compressed, &encoded)); return absl::StrReplaceAll(encoded, {{"_", "/"}, {"-", "+"}}); } static std::string EscapeJSONString(absl::string_view raw) { return absl::StrCat( "\"", absl::StrReplaceAll(raw, {{"\n", "\\n"}, {"\"", "\\\""}, {"\\", "\\\\"}}), "\""); } absl::StatusOr<std::string> WrapFusionExplorer( const FusionVisualizerProgress& visualizer_progress, absl::string_view graph_title) { if (visualizer_progress.frames.empty()) { return Internal("Empty"); } std::string dot_graphs = StrFormat("[%s]", StrJoin(visualizer_progress.dot_graphs, ", ", [&](std::string* out, const std::string& dot) { StrAppend(out, EscapeJSONString(dot)); })); std::string frames = StrJoin( visualizer_progress.frames, ", ", [&](std::string* out, const auto& p) { StrAppend(out, StrFormat("[%d, %s, %s]", p.dot_graph, EscapeJSONString(p.label), EscapeJSONString(p.to_highlight))); }); TF_ASSIGN_OR_RETURN(std::string dot_graphs_compressed, CompressAndEncode(dot_graphs)); return absl::StrReplaceAll( R"wrapper( <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <style> html, body {height: 100%; text-align: center;} #rendered {height: 70%; width: 80%; border:1px solid black; margin: auto; } #label {width: 80%; margin: auto;} #performance_note { font-size: small; color: gray; } #frames_list { list-style: none; text-align: left; height: 20%; overflow: scroll; } #frames_list li { padding: 0.2em; margin: 0.2em; } .selected { background-color: #e0e0e0; } .selected a { color: black; text-decoration: none; } #rendered svg { height: 100% !important; width: 100% !important; } </style> </head> <body> <script src="https: integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ" crossorigin="anonymous"></script> <script src="https: </script> <title>Fusion Explorer: $TITLE</title> <div id='rendered'><center>Loading...</center></div> <ul id='frames_list'></ul> <p>Use j/k for keyboard navigation.</p> <p id='performance_note'>Loading data...</p> <script> <!-- const renderCache = {}; const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm'); const hpccWasm = window["@hpcc-js/wasm"]; const getIdFromHash = () => { let hash = window.location.hash; if (hash.indexOf('frame') == -1) { return 0; } return parseInt(window.location.hash.substring('#frame'.length, window.location.hash.length)); } const renderCurrentFrame = () => { if (!window.loaded) { return; } const frames_list = document.getElementById('frames_list'); const currId = getIdFromHash(); for (let selected of frames_list.getElementsByClassName('selected')) { selected.classList.remove('selected'); } const selected = frames_list.children[currId]; selected.classList.add('selected'); selected.scrollIntoView(); const frame = frames[currId]; const dot_ptr = frame[0]; let dot_txt = window.dots[dot_ptr]; const label = frame[1]; document.getElementById('performance_note').innerText = "Rendering..."; const results = cssregex.exec(dot_txt) let css_data = '' if (results !== null) { css_data = results[1].replace(/\s*data:.*\s*,/,''); css_data = unescape(css_data); dot_txt = dot_txt.replace(cssregex, ''); } let render_start = performance.now(); const render_callback = svg => { renderCache[dot_ptr] = svg; var area = document.getElementById('rendered'); area.innerHTML = `${svg}<style>${css_data}</style>`; var panzoom = svgPanZoom(area.children[0], { zoomEnabled: true, controlIconsEnabled: true, maxZoom: 200, }); var to_highlight = frame[2].length ? document.querySelector(`${frame[2]}`) : null; if (to_highlight) { to_highlight.style.setProperty('fill', 'red'); } document.getElementById('performance_note').innerText = `Rendering took ${(performance.now() - render_start).toFixed(2)}ms`; let text_nodes = document.getElementsByTagName("text"); for (var el of text_nodes) { if (title_to_id.has(el.innerHTML)) { el.style.cursor = "pointer"; } } }; if (renderCache[dot_ptr]) { render_callback(renderCache[dot_ptr]); } else { hpccWasm.graphviz.layout(dot_txt, "svg", "dot").then(render_callback); } }; const update = (delta) => { let currId = getIdFromHash(); currId = (currId + delta + frames.length) % frames.length; window.location.hash = `#frame${currId}` }; const renderFrameList = () => { const currId = getIdFromHash(); const frames_list = document.getElementById('frames_list'); for (let i=0; i<frames.length; i++) { const f = frames[i]; let frame_descr = f[1]; const rendered = document.createElement("li"); if (frame_descr == "") { frame_descr = "Unnamed state"; } rendered.innerHTML = `<a href="#frame${i}">${frame_descr}</a>`; if (i == currId) { rendered.classList.add('selected'); } frames_list.appendChild(rendered); } }; const decompress = async function(compressed) { const ds = new DecompressionStream('gzip'); const in_fetch = await fetch(`data:application/octet-stream;base64,${compressed}`); const in_blob = await in_fetch.blob(); const out_stream = in_blob.stream().pipeThrough(ds); const out_blob = await new Response(out_stream).blob(); return await out_blob.text(); } const dots_compressed = "$DOTS"; const frames = [$FRAMES]; let loaded = false; window.addEventListener('hashchange', () => { renderCurrentFrame(); }); window.addEventListener("keydown", (event) => { if (event.defaultPrevented) { return; } if (event.key == "j") { update(1); } else if (event.key == "k") { update(-1); } else { return; } event.preventDefault(); }, true); document.addEventListener("DOMContentLoaded", () => { decompress(dots_compressed).then(text => { window.dots = JSON.parse(text); window.loaded = true; renderFrameList(); renderCurrentFrame(); }); window.title_to_id = new Map(); for (let i=0; i < frames.length; i++) { title_to_id.set(frames[i][1], i); } document.addEventListener("click", (event) => { let txt = event.target.innerHTML; if (title_to_id.has(txt)) { let id = title_to_id.get(txt); window.location.hash = `#frame${id}`; } }); }); </script> </body> </html> )wrapper", {{"$DOTS", dot_graphs_compressed}, {"$FRAMES", frames}, {"$TITLE", graph_title}}); } static std::string GraphTitle(const HloComputation& computation) { return absl::StrCat(computation.parent()->name(), "_", computation.name()); } absl::StatusOr<std::string> WrapFusionExplorer( const HloComputation& computation) { absl::MutexLock lock(&fusion_visualizer_state_mu); const FusionVisualizerProgress& visualizer_progress = fusion_visualizer_states[FusionVisualizerStateKey(computation)]; return WrapFusionExplorer(visualizer_progress, GraphTitle(computation)); } static absl::StatusOr<std::string> WrapDotInHtml(absl::string_view dot, absl::string_view title) { FusionVisualizerProgress progress; progress.AddState(dot, title, std::nullopt); return WrapFusionExplorer(progress, title); } static absl::StatusOr<std::string> WrapDotInFormat( const HloComputation& computation, absl::string_view dot, RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) { switch (format) { case RenderedGraphFormat::kUrl: CHECK(url_renderer != nullptr) << "Should have checked url_renderer != null before calling."; return (*url_renderer)(dot); case RenderedGraphFormat::kHtml: return WrapDotInHtml(dot, GraphTitle(computation)); case RenderedGraphFormat::kDot: return std::string(dot); } } void RegisterGraphToURLRenderer( std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) { absl::MutexLock lock(&url_renderer_mu); if (url_renderer != nullptr) { LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call " "wins, but because order of initialization in C++ is " "nondeterministic, this may not be what you want."; } delete url_renderer; url_renderer = new std::function<absl::StatusOr<std::string>(absl::string_view)>( std::move(renderer)); } void RegisterFusionState(const HloComputation& computation, absl::string_view label, const HloInstruction& consumer, const HloInstruction* producer) { absl::MutexLock lock(&fusion_visualizer_state_mu); FusionVisualizerProgress& fusion_progress = fusion_visualizer_states[FusionVisualizerStateKey(computation)]; static constexpr int kRenderRadius = 4; absl::flat_hash_set<const HloInstruction*> render_boundary; for (const HloInstruction* user : consumer.users()) { render_boundary.insert(user); } HloDotDumper dumper( consumer.parent(), StrCat("Rendering of ", kRenderRadius, " nodes around fusion consumer"), consumer.GetModule()->config().debug_options(), {}, MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary)); std::string dot_txt = dumper.Dump(); std::optional<std::string> producer_to_highlight; if (producer) { producer_to_highlight = dumper.CssIdForInstruction(*producer); } fusion_progress.AddState(dot_txt, label, producer_to_highlight); } absl::StatusOr<std::string> RenderGraph( const HloComputation& computation, absl::string_view label, const DebugOptions& debug_options, RenderedGraphFormat format, HloRenderOptions hlo_render_options, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return Unavailable("Can't render as URL; no URL renderer was registered."); } std::string rendered_dot = HloDotDumper(&computation, label, debug_options, hlo_render_options, NodeFilter(), color_map) .Dump(); return WrapDotInFormat(computation, rendered_dot, format); } absl::StatusOr<std::string> RenderAllComputationsToHtml( const HloModule& module) { FusionVisualizerProgress progress; std::vector<HloInstruction*> instrs = module.entry_computation()->MakeInstructionPostOrder(); absl::c_reverse(instrs); for (const HloInstruction* instr : instrs) { if (absl::c_linear_search( std::vector<HloOpcode>{HloOpcode::kConstant, HloOpcode::kGetTupleElement}, instr->opcode())) { continue; } HloRenderOptions opts; opts.show_fusion_subcomputations = true; opts.show_backend_config = true; opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile; static constexpr int64_t max_nodes_to_render = 100; absl::flat_hash_set<const HloInstruction*> render_boundary; NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary); if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) { filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary); } std::string dot = HloDotDumper(module.entry_computation(), instr->name(), module.config().debug_options(), opts, filter) .Dump(); progress.AddState(dot, instr->name(), std::nullopt); } return WrapFusionExplorer(progress, module.name()); } absl::StatusOr<std::string> RenderNeighborhoodAround( const HloInstruction& node, int radius, RenderedGraphFormat format, HloRenderOptions hlo_render_options, const absl::flat_hash_set<const HloInstruction*>& boundary, std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>> color_map) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return FailedPrecondition( "Can't render as URL; no URL renderer was registered."); } std::string label = StrCat("Neighborhood of ", radius, " nodes around ", node.name()); std::string rendered_dot = HloDotDumper( node.parent(), label, node.GetModule()->config().debug_options(), hlo_render_options, MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map) .Dump(); return WrapDotInFormat(*node.parent(), rendered_dot, format); } absl::StatusOr<std::string> RenderAllPathsFromTo( const HloInstruction& from, const HloInstruction& to, int64_t max_nodes, RenderedGraphFormat format, HloRenderOptions hlo_render_options) { absl::MutexLock lock(&url_renderer_mu); if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) { return FailedPrecondition( "Can't render as URL; no URL renderer was registered."); } CHECK_EQ(from.parent(), to.parent()) << "Nodes must be in same computation!"; auto debug_options = from.GetModule()->config().debug_options(); bool hit_limit = false; NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit); std::string label; if (!hit_limit) { label = StrCat("All paths from ", from.name(), " to ", to.name()); } else { label = StrCat(max_nodes, " nodes on the shortest paths from ", from.name(), " to ", to.name(), "<br/><br/>***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN " "NODES***<br/><br/>"); } std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options, hlo_render_options, filter) .Dump(); return WrapDotInFormat(*from.parent(), rendered_dot, format); } }
#include "xla/service/hlo_graph_dumper.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_utils.h" #include "xla/xla.pb.h" namespace xla { namespace { using absl::StrCat; using ::testing::HasSubstr; using HloGraphDumperTest = HloTestBase; std::string TestName() { return ::testing::UnitTest::GetInstance()->current_test_info()->name(); } TEST_F(HloGraphDumperTest, NestedFusion) { HloComputation::Builder b("b"); auto shape = ShapeUtil::MakeShape(F32, {10, 100}); std::vector<HloInstruction*> params; for (int i = 0; i <= 4; ++i) { params.push_back(b.AddInstruction( HloInstruction::CreateParameter(i, shape, StrCat("param", i)))); } std::vector<HloInstruction*> sums; sums.push_back(b.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, params[0], params[1]))); for (int i = 0; i <= 2; ++i) { sums.push_back(b.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, sums[i], params[i + 2]))); } HloModuleConfig config; HloModule m(TestName(), config); m.AddEntryComputation(b.Build()); HloComputation* root_computation = m.entry_computation(); auto* outer_fusion = root_computation->CreateFusionInstruction( {sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop); std::vector<HloInstruction*> fused_sums; for (auto* instr : outer_fusion->fused_instructions_computation() ->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kAdd) { fused_sums.push_back(instr); } } auto* inner_fusion = outer_fusion->fused_instructions_computation()->CreateFusionInstruction( {fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "", DebugOptions(), RenderedGraphFormat::kDot)); for (const HloComputation* computation : {root_computation, inner_fusion->fused_instructions_computation(), outer_fusion->fused_instructions_computation()}) { for (const HloInstruction* instruction : computation->instructions()) { EXPECT_THAT(graph, HasSubstr(instruction->name())); } } const HloInstruction* inner_sum = nullptr; for (const HloInstruction* instruction : inner_fusion->fused_instructions_computation()->instructions()) { if (instruction->opcode() == HloOpcode::kAdd) { inner_sum = instruction; break; } } ASSERT_NE(inner_sum, nullptr); TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph, RenderNeighborhoodAround(*inner_sum, 1, RenderedGraphFormat::kDot)); EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name())); } TEST_F(HloGraphDumperTest, Constant) { HloComputation::Builder b("b"); auto instruction = b.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42))); instruction->SetAndSanitizeName("i_am_a_constant_root_instruction"); HloModuleConfig config; HloModule m(TestName(), config); HloComputation* root_computation = m.AddEntryComputation(b.Build()); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "an_empty_graph", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("an_empty_graph")); } TEST_F(HloGraphDumperTest, TupleConstant) { Shape tuple_shape = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})}); HloComputation::Builder b("b"); auto constant = b.AddInstruction( HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape))); auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement( ShapeUtil::MakeShape(F32, {3, 2}), constant, 0)); HloModuleConfig config; HloModule m(TestName(), config); HloComputation* root_computation = m.AddEntryComputation(b.Build(gte)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*root_computation, "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("tuple_constant")); EXPECT_THAT(graph, HasSubstr("constant (f32[3,2], s32[4,5])")); } TEST_F(HloGraphDumperTest, Compare) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0) param.1 = f32[10] parameter(1) ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); EXPECT_THAT(graph, HasSubstr("direction=LT")); } TEST_F(HloGraphDumperTest, HasStatisticsViz) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5} param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4} ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); } TEST_F(HloGraphDumperTest, RootIsConstant) { const char* hlo_string = R"( HloModule indexed_conditional %then_branch (empty: ()) -> f32[] { %empty = () parameter(0) ROOT %then = f32[] constant(1) } %else_branch (empty.1: ()) -> f32[] { %empty.1 = () parameter(0) ROOT %else = f32[] constant(2) } ENTRY %conditional_select (constant: pred[]) -> (f32[]) { %constant = pred[] parameter(0) %emptytuple = () tuple() %conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch ROOT %t = (f32[]) tuple(f32[] %conditional) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot)); } TEST_F(HloGraphDumperTest, OverrideColors) { const char* hlo_string = R"( HloModule comp ENTRY comp { param.0 = f32[10] parameter(0) param.1 = f32[10] parameter(1) ROOT lt = pred[10] compare(param.0, param.1), direction=LT })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); absl::flat_hash_map<const HloInstruction*, ColorStats> color_map; ColorStats color_stats_1; color_stats_1.color = "#A9C343"; color_stats_1.stats = absl::StrFormat("%.3f", 1.11); ColorStats color_stats_2; color_stats_2.color = "#BC8A3F"; color_stats_2.stats = absl::StrFormat("%.3f", 2.22); color_map[module->entry_computation()->GetInstructionWithName("param.0")] = color_stats_1; color_map[module->entry_computation()->GetInstructionWithName("param.1")] = color_stats_2; HloRenderOptions hlo_render_options; hlo_render_options.override_node_colors = true; TF_ASSERT_OK_AND_ASSIGN( std::string graph, RenderGraph(*module->entry_computation(), "tuple_constant", DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options, color_map)); EXPECT_THAT(graph, HasSubstr("#A9C343")); EXPECT_THAT(graph, HasSubstr("1.110")); EXPECT_THAT(graph, HasSubstr("#BC8A3F")); EXPECT_THAT(graph, HasSubstr("2.220")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
71217517-bd22-4e54-9891-4344e5585a1c
cpp
tensorflow/tensorflow
uniform_quantized_types
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
#include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h" #include <cstdint> #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Operation.h" #include "mlir/IR/Types.h" #include "mlir/Support/LLVM.h" #define DEBUG_TYPE "uniform-quantized-types" namespace mlir { namespace quant { UniformQuantizedType CreateI8F32UniformQuantizedType(const Location loc, MLIRContext& context, const double scale, const int64_t zero_point, const bool narrow_range) { return UniformQuantizedType::getChecked( loc, QuantizationFlags::Signed, IntegerType::get(&context, 8), FloatType::getF32(&context), scale, zero_point, llvm::minIntN(8) + (narrow_range ? 1 : 0), llvm::maxIntN(8)); } UniformQuantizedType CreateI32F32UniformQuantizedType( const Location loc, MLIRContext& context, const double scale, const int64_t zero_point) { return UniformQuantizedType::getChecked( loc, QuantizationFlags::Signed, IntegerType::get(&context, 32), FloatType::getF32(&context), scale, zero_point, llvm::minIntN(32), llvm::maxIntN(32)); } UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType( const Location loc, MLIRContext& context, const ArrayRef<double> scales, const ArrayRef<int64_t> zero_points, const int quantization_dimension, const bool narrow_range) { return UniformQuantizedPerAxisType::getChecked( loc, QuantizationFlags::Signed, IntegerType::get(&context, 8), FloatType::getF32(&context), SmallVector<double>(scales), SmallVector<int64_t>(zero_points), quantization_dimension, llvm::minIntN(8) + (narrow_range ? 1 : 0), llvm::maxIntN(8)); } UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType( const Location loc, MLIRContext& context, const ArrayRef<double> scales, const ArrayRef<int64_t> zero_points, const int quantization_dimension) { return UniformQuantizedPerAxisType::getChecked( loc, QuantizationFlags::Signed, IntegerType::get(&context, 32), FloatType::getF32(&context), SmallVector<double>(scales), SmallVector<int64_t>(zero_points), quantization_dimension, llvm::minIntN(32), llvm::maxIntN(32)); } bool IsStorageTypeI8(const QuantizedType quantized_type) { const Type storage_type = quantized_type.getStorageType(); return storage_type.isInteger(8); } bool IsStorageTypeI32(const QuantizedType quantized_type) { const Type storage_type = quantized_type.getStorageType(); return storage_type.isInteger(32); } bool IsExpressedTypeF32(const QuantizedType quantized_type) { const Type expressed_type = quantized_type.getExpressedType(); return mlir::isa<Float32Type>(expressed_type); } bool IsI8F32UniformQuantizedType(const Type type) { const UniformQuantizedType quantized_type = mlir::dyn_cast_or_null<UniformQuantizedType>(type); if (!quantized_type) { LLVM_DEBUG(llvm::dbgs() << "Expected a uniform quantized type. Got: " << type << ".\n"); return false; } if (!IsStorageTypeI8(quantized_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: " << quantized_type << ".\n"); return false; } if (!IsExpressedTypeF32(quantized_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: " << quantized_type << ".\n"); return false; } return true; } bool IsI8F32UniformQuantizedPerAxisType(const Type type) { const UniformQuantizedPerAxisType quantized_per_axis_type = mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type); if (!quantized_per_axis_type) { LLVM_DEBUG(llvm::dbgs() << "Expected a uniform quantized type. Got: " << type << ".\n"); return false; } if (!IsStorageTypeI8(quantized_per_axis_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: " << quantized_per_axis_type << ".\n"); return false; } if (!IsExpressedTypeF32(quantized_per_axis_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: " << quantized_per_axis_type << ".\n"); return false; } return true; } bool IsI32F32UniformQuantizedType(const Type type) { const UniformQuantizedType quantized_type = mlir::dyn_cast_or_null<UniformQuantizedType>(type); if (!quantized_type) { LLVM_DEBUG(llvm::dbgs() << "Expected a uniform quantized type. Got: " << type << ".\n"); return false; } if (!IsStorageTypeI32(quantized_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: " << quantized_type << ".\n"); return false; } if (!IsExpressedTypeF32(quantized_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: " << quantized_type << ".\n"); return false; } return true; } bool IsI32F32UniformQuantizedPerAxisType(const Type type) { const UniformQuantizedPerAxisType quantized_per_axis_type = mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type); if (!quantized_per_axis_type) { LLVM_DEBUG(llvm::dbgs() << "Expected a uniform quantized type. Got: " << type << ".\n"); return false; } if (!IsStorageTypeI32(quantized_per_axis_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: " << quantized_per_axis_type << ".\n"); return false; } if (!IsExpressedTypeF32(quantized_per_axis_type)) { LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: " << quantized_per_axis_type << ".\n"); return false; } return true; } bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) { if (storage_type.getWidth() == 8 || (storage_type.isSigned() && storage_type.getWidth() == 16)) { return true; } LLVM_DEBUG(llvm::dbgs() << "Uniform quantize / dequantize op only supports ui8, i8 or " "i16 for the storage type of uniform quantized type. Got: " << storage_type << ".\n"); return false; } bool IsQuantizedTensorType(Type type) { if (!mlir::isa<TensorType>(type)) { return false; } Type element_type = mlir::cast<TensorType>(type).getElementType(); return mlir::isa<QuantizedType>(element_type); } bool IsOpFullyQuantized(Operation* op) { return llvm::all_of(op->getOperandTypes(), IsQuantizedTensorType) && llvm::all_of(op->getResultTypes(), IsQuantizedTensorType); } bool IsOpNotQuantized(Operation* op) { return !llvm::any_of(op->getOperandTypes(), IsQuantizedTensorType) && !llvm::any_of(op->getResultTypes(), IsQuantizedTensorType); } } }
#include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h" #include <cstdint> #include <limits> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/Quant/IR/Quant.h" #include "mlir/Dialect/Quant/IR/QuantTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/IR/Value.h" #include "mlir/Support/LLVM.h" #include "stablehlo/dialect/StablehloOps.h" #include "tensorflow/compiler/mlir/quantization/common/test_base.h" namespace mlir { namespace quant { namespace { using ::testing::ElementsAreArray; using ::testing::IsNull; using ::testing::Ne; using ::testing::NotNull; using ::testing::Test; class CreateI8F32UniformQuantizedTypeTest : public Test { protected: CreateI8F32UniformQuantizedTypeTest() : ctx_() { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; }; TEST_F(CreateI8F32UniformQuantizedTypeTest, I8StorageTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8)); } TEST_F(CreateI8F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.isSigned()); } TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI8MinMax) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_EQ(quantized_type.getStorageTypeMin(), -128); EXPECT_EQ(quantized_type.getStorageTypeMax(), 127); } TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxNarrowRange) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType( UnknownLoc::get(&ctx_), ctx_, 1.0, 0, true); EXPECT_EQ(quantized_type.getStorageTypeMin(), -127); EXPECT_EQ(quantized_type.getStorageTypeMax(), 127); } TEST_F(CreateI8F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) { const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 8.0, 99); EXPECT_EQ(quantized_type.getScale(), 8.0); EXPECT_EQ(quantized_type.getZeroPoint(), 99); } class CreateI32F32UniformQuantizedTypeTest : public Test { protected: CreateI32F32UniformQuantizedTypeTest() : ctx_() { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; }; TEST_F(CreateI32F32UniformQuantizedTypeTest, I32StorageTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32)); } TEST_F(CreateI32F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI32F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedType quantized_type = CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_TRUE(quantized_type.isSigned()); } TEST_F(CreateI32F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI32MinMax) { const UniformQuantizedType quantized_type = CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 1.0, 0); EXPECT_EQ(quantized_type.getStorageTypeMin(), std::numeric_limits<int32_t>::min()); EXPECT_EQ(quantized_type.getStorageTypeMax(), std::numeric_limits<int32_t>::max()); } TEST_F(CreateI32F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) { const UniformQuantizedType quantized_type = CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_, 8.0, 1111); EXPECT_EQ(quantized_type.getScale(), 8.0); EXPECT_EQ(quantized_type.getZeroPoint(), 1111); } class CreateI8F32UniformQuantizedPerAxisTypeTest : public Test { protected: CreateI8F32UniformQuantizedPerAxisTypeTest() : ctx_() { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; }; TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, I8StorageTypeSucceeds) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8)); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, SignedQuantizedTypeSucceeds) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_TRUE(quantized_type.isSigned()); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, StorageTypeMinMaxEqualToI8MinMax) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_EQ(quantized_type.getStorageTypeMin(), -128); EXPECT_EQ(quantized_type.getStorageTypeMax(), 127); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, StorageTypeMinMaxNarrowRange) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0, true); EXPECT_EQ(quantized_type.getStorageTypeMin(), -127); EXPECT_EQ(quantized_type.getStorageTypeMax(), 127); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, HasQuantizationDimensionProperlySet) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 3); EXPECT_EQ(quantized_type.getQuantizedDimension(), 3); } TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, HasScaleAndZeroPointProperlySet) { const UniformQuantizedPerAxisType quantized_type = CreateI8F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{8.0, 9.0}, SmallVector<int64_t, 2>{98, 99}, 0); EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0})); EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99})); } class CreateI32F32UniformQuantizedPerAxisTypeTest : public Test { protected: CreateI32F32UniformQuantizedPerAxisTypeTest() : ctx_() { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; }; TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, I32StorageTypeSucceeds) { const UniformQuantizedPerAxisType quantized_type = CreateI32F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32)); } TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) { const UniformQuantizedPerAxisType quantized_type = CreateI32F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_TRUE(quantized_type.getExpressedType().isF32()); } TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, StorageTypeMinMaxEqualToI32MinMax) { const UniformQuantizedPerAxisType quantized_type = CreateI32F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 0); EXPECT_EQ(quantized_type.getStorageTypeMin(), std::numeric_limits<int32_t>::min()); EXPECT_EQ(quantized_type.getStorageTypeMax(), std::numeric_limits<int32_t>::max()); } TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, HasQuantizationDimensionProperlySet) { const UniformQuantizedPerAxisType quantized_type = CreateI32F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{1.0, 1.0}, SmallVector<int64_t, 2>{0, 0}, 3); EXPECT_EQ(quantized_type.getQuantizedDimension(), 3); } TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, HasScaleAndZeroPointProperlySet) { const UniformQuantizedPerAxisType quantized_type = CreateI32F32UniformQuantizedPerAxisType( UnknownLoc::get(&ctx_), ctx_, SmallVector<double, 2>{8.0, 9.0}, SmallVector<int64_t, 2>{98, 99}, 0); EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0})); EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99})); } class IsI8F32UniformQuantizedTypeTest : public Test { protected: IsI8F32UniformQuantizedTypeTest() : builder_(&ctx_) { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; OpBuilder builder_; }; TEST_F(IsI8F32UniformQuantizedTypeTest, I8F32UniformQuantizedTypeSucceeds) { const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsI8F32UniformQuantizedType(qi8_type)); } TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) { const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi8_type), NotNull()); } TEST_F(IsI8F32UniformQuantizedTypeTest, StorageTypeI8Succeeds) { const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsStorageTypeI8(qi8_type)); } TEST_F(IsI8F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) { const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsExpressedTypeF32(qi8_type)); } class IsI8F32UniformQuantizedPerAxisTypeTest : public Test { protected: IsI8F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; OpBuilder builder_; }; TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, I8F32UniformQuantizedPerAxisTypeSucceeds) { const UniformQuantizedPerAxisType qi8_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), {1.0}, {0}, 0, -128, 127); EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type)); EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type)); } TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) { const UniformQuantizedPerAxisType qi8_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), {1.0}, {0}, 0, -128, 127); EXPECT_THAT( mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_per_axis_type), NotNull()); } TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) { const UniformQuantizedPerAxisType qi8_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), {1.0}, {0}, 0, -128, 127); EXPECT_TRUE(IsStorageTypeI8(qi8_per_axis_type)); } TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) { const UniformQuantizedPerAxisType qi8_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), {1.0}, {0}, 0, -128, 127); EXPECT_TRUE(IsExpressedTypeF32(qi8_per_axis_type)); } class IsI32F32UniformQuantizedTypeTest : public Test { protected: IsI32F32UniformQuantizedTypeTest() : builder_(&ctx_) { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; OpBuilder builder_; }; TEST_F(IsI32F32UniformQuantizedTypeTest, I32F32UniformQuantizedTypeSucceeds) { const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), 1.0, 0, -2147483647, 2147483646); EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type)); } TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) { const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), 1.0, 0, -2147483647, 2147483646); EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type)); EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi32_type), NotNull()); } TEST_F(IsI32F32UniformQuantizedTypeTest, StorageTypeI32Succeeds) { const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), 1.0, 0, -2147483647, 2147483646); EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type)); EXPECT_TRUE(IsStorageTypeI32(qi32_type)); } TEST_F(IsI32F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) { const UniformQuantizedType qi32_per_axis_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), 1.0, 0, -2147483647, 2147483646); EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type)); } class IsI32F32UniformQuantizedPerAxisTypeTest : public Test { protected: IsI32F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; OpBuilder builder_; }; TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, I32F32UniformQuantizedPerAxisTypeSucceeds) { const UniformQuantizedPerAxisType qi32_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), {1.0}, {0}, 0, -2147483647, 2147483646); EXPECT_TRUE(IsI32F32UniformQuantizedPerAxisType(qi32_per_axis_type)); EXPECT_FALSE(IsI32F32UniformQuantizedType(qi32_per_axis_type)); } TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, I8F32UniformQuantizedTypeFails) { const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_FALSE(IsI32F32UniformQuantizedPerAxisType(qi8_type)); EXPECT_FALSE(IsStorageTypeI32(qi8_type)); EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_type), IsNull()); } TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) { const UniformQuantizedPerAxisType qi32_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), {1.0}, {0}, 0, -2147483647, 2147483646); EXPECT_THAT( mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi32_per_axis_type), NotNull()); } TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) { const UniformQuantizedPerAxisType qi32_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), {1.0}, {0}, 0, -2147483647, 2147483646); EXPECT_TRUE(IsStorageTypeI32(qi32_per_axis_type)); } TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) { const UniformQuantizedPerAxisType qi32_per_axis_type = quant::UniformQuantizedPerAxisType::get( QuantizationFlags::Signed, builder_.getI32Type(), builder_.getF32Type(), {1.0}, {0}, 0, -2147483647, 2147483646); EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type)); } class IsSupportedByTfliteQuantizeOrDequantizeOpsTest : public Test { protected: IsSupportedByTfliteQuantizeOrDequantizeOpsTest() : builder_(&ctx_) { ctx_.loadDialect<quant::QuantDialect>(); } MLIRContext ctx_; OpBuilder builder_; }; TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI8Succeeds) { auto qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps( dyn_cast_or_null<IntegerType>(qi8_type.getStorageType()))); } TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI16Succeeds) { auto qi16_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps( dyn_cast_or_null<IntegerType>(qi16_type.getStorageType()))); } TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeUI8Succeeds) { auto qi8_type = quant::UniformQuantizedType::get( QuantizationFlags::Signed, builder_.getI8Type(), builder_.getF32Type(), 1.0, 0, -128, 127); EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps( dyn_cast_or_null<IntegerType>(qi8_type.getStorageType()))); } using IsOpFullyQuantizedTest = QuantizationTestBase; TEST_F(IsOpFullyQuantizedTest, TrueIfOpFullyQuantized) { constexpr absl::string_view kFullyQuantizedAdd = R"mlir( func.func @fully_quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kFullyQuantizedAdd); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("fully_quantized_add"); ASSERT_THAT(func_op, NotNull()); auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>(); ASSERT_THAT(add_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>())); EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr)); } TEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) { constexpr absl::string_view kNotQuantizedAdd = R"mlir( func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> { %0 = stablehlo.add %arg0, %arg0 : tensor<2xf32> return %0 : tensor<2xf32> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add"); ASSERT_THAT(func_op, NotNull()); auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>(); ASSERT_THAT(add_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>())); EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr)); } TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) { constexpr absl::string_view kQuantizeOp = R"mlir( func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op_itr = func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>(); ASSERT_THAT( uniform_quantize_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>())); EXPECT_FALSE(IsOpFullyQuantized(*uniform_quantize_op_itr)); } using IsOpNotQuantizedTest = QuantizationTestBase; TEST_F(IsOpNotQuantizedTest, TrueIfOpNotQuantized) { constexpr absl::string_view kNotQuantizedAdd = R"mlir( func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> { %0 = stablehlo.add %arg0, %arg0 : tensor<2xf32> return %0 : tensor<2xf32> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add"); ASSERT_THAT(func_op, NotNull()); auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>(); ASSERT_THAT(add_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>())); EXPECT_TRUE(IsOpNotQuantized(*add_op_itr)); } TEST_F(IsOpNotQuantizedTest, FalseIfOpQuantized) { constexpr absl::string_view kQuantizedAdd = R"mlir( func.func @quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizedAdd); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("quantized_add"); ASSERT_THAT(func_op, NotNull()); auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>(); ASSERT_THAT(add_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>())); EXPECT_FALSE(IsOpNotQuantized(*add_op_itr)); } TEST_F(IsOpNotQuantizedTest, FalseIfOpPartiallyQuantized) { constexpr absl::string_view kQuantizeOp = R"mlir( func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op_itr = func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>(); ASSERT_THAT( uniform_quantize_op_itr, Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>())); EXPECT_FALSE(IsOpNotQuantized(*uniform_quantize_op_itr)); } using UniformQuantizedTypeTest = QuantizationTestBase; TEST_F(UniformQuantizedTypeTest, GetElementTypeSucceeds) { constexpr absl::string_view kQuantizeOp = R"mlir( func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> { %0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize"); ASSERT_THAT(func_op, NotNull()); auto uniform_quantize_op = *func_op.getOps<::mlir::stablehlo::UniformQuantizeOp>().begin(); Value result = uniform_quantize_op.getResult(); EXPECT_THAT(GetElementType(result), NotNull()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
94996556-fc86-4b9e-8614-170ea8e383c5
cpp
tensorflow/tensorflow
tensor_id
tensorflow/core/graph/tensor_id.cc
tensorflow/core/graph/tensor_id_test.cc
#include "tensorflow/core/graph/tensor_id.h" #include <string> #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/strings/str_util.h" namespace tensorflow { TensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {} SafeTensorId::SafeTensorId(const TensorId& id) : SafeTensorId(string(id.first), id.second) {} TensorId ParseTensorName(const string& name) { return ParseTensorName(StringPiece(name.data(), name.size())); } TensorId ParseTensorName(StringPiece name) { const char* base = name.data(); const char* p = base + name.size() - 1; unsigned int index = 0; unsigned int mul = 1; while (p > base && (*p >= '0' && *p <= '9')) { index += ((*p - '0') * mul); mul *= 10; p--; } TensorId id; if (p > base && *p == ':' && mul > 1) { id.first = StringPiece(base, p - base); id.second = index; } else if (absl::StartsWith(name, "^")) { id.first = StringPiece(base + 1); id.second = Graph::kControlSlot; } else { id.first = name; id.second = 0; } return id; } bool IsTensorIdControl(const TensorId& tensor_id) { return tensor_id.index() == Graph::kControlSlot; } }
#include "tensorflow/core/graph/tensor_id.h" #include <vector> #include "tensorflow/core/lib/random/simple_philox.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { string ParseHelper(const string& n) { return ParseTensorName(n).ToString(); } TEST(TensorIdTest, ParseTensorName) { EXPECT_EQ(ParseHelper("W1"), "W1:0"); EXPECT_EQ(ParseHelper("W1:0"), "W1:0"); EXPECT_EQ(ParseHelper("weights:0"), "weights:0"); EXPECT_EQ(ParseHelper("W1:1"), "W1:1"); EXPECT_EQ(ParseHelper("W1:17"), "W1:17"); EXPECT_EQ(ParseHelper("xyz1_17"), "xyz1_17:0"); EXPECT_EQ(ParseHelper("^foo"), "^foo"); } uint32 Skewed(random::SimplePhilox* rnd, int max_log) { const uint32 space = 1 << (rnd->Rand32() % (max_log + 1)); return rnd->Rand32() % space; } void BM_ParseTensorName(::testing::benchmark::State& state) { const int arg = state.range(0); random::PhiloxRandom philox(301, 17); random::SimplePhilox rnd(&philox); std::vector<string> names; for (int i = 0; i < 100; i++) { string name; switch (arg) { case 0: { size_t len = Skewed(&rnd, 4); while (name.size() < len) { name += rnd.OneIn(4) ? '0' : 'a'; } if (rnd.OneIn(3)) { strings::StrAppend(&name, ":", rnd.Uniform(12)); } break; } case 1: name = "W1"; break; case 2: name = "t0003"; break; case 3: name = "weights"; break; case 4: name = "weights:17"; break; case 5: name = "^weights"; break; default: LOG(FATAL) << "Unexpected arg"; break; } names.push_back(name); } TensorId id; int index = 0; int sum = 0; for (auto s : state) { id = ParseTensorName(names[index++ % names.size()]); sum += id.second; } VLOG(2) << sum; } BENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5); TEST(TensorIdTest, IsTensorIdControl) { string input = "^foo"; TensorId tensor_id = ParseTensorName(input); EXPECT_TRUE(IsTensorIdControl(tensor_id)); input = "foo"; tensor_id = ParseTensorName(input); EXPECT_FALSE(IsTensorIdControl(tensor_id)); input = "foo:2"; tensor_id = ParseTensorName(input); EXPECT_FALSE(IsTensorIdControl(tensor_id)); } TEST(TensorIdTest, PortZero) { for (string input : {"foo", "foo:0"}) { TensorId tensor_id = ParseTensorName(input); EXPECT_EQ("foo", tensor_id.node()); EXPECT_EQ(0, tensor_id.index()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9f641e69-b766-49cc-9efc-fb43469c2b28
cpp
abseil/abseil-cpp
globals
absl/log/internal/globals.cc
absl/log/globals_test.cc
#include "absl/log/internal/globals.h" #include <atomic> #include <cstdio> #if defined(__EMSCRIPTEN__) #include <emscripten/console.h> #endif #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/log_severity.h" #include "absl/strings/string_view.h" #include "absl/strings/strip.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace log_internal { namespace { ABSL_CONST_INIT std::atomic<bool> logging_initialized(false); ABSL_CONST_INIT std::atomic<absl::TimeZone*> timezone_ptr{nullptr}; ABSL_CONST_INIT std::atomic<bool> symbolize_stack_trace(true); ABSL_CONST_INIT std::atomic<int> max_frames_in_stack_trace(64); ABSL_CONST_INIT std::atomic<bool> exit_on_dfatal(true); ABSL_CONST_INIT std::atomic<bool> suppress_sigabort_trace(false); } bool IsInitialized() { return logging_initialized.load(std::memory_order_acquire); } void SetInitialized() { logging_initialized.store(true, std::memory_order_release); } void WriteToStderr(absl::string_view message, absl::LogSeverity severity) { if (message.empty()) return; #if defined(__EMSCRIPTEN__) const auto message_minus_newline = absl::StripSuffix(message, "\n"); #if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043 emscripten_errn(message_minus_newline.data(), message_minus_newline.size()); #else std::string null_terminated_message(message_minus_newline); _emscripten_err(null_terminated_message.c_str()); #endif #else std::fwrite(message.data(), message.size(), 1, stderr); #endif #if defined(_WIN64) || defined(_WIN32) || defined(_WIN16) if (severity >= absl::LogSeverity::kWarning) { std::fflush(stderr); } #else (void)severity; #endif } void SetTimeZone(absl::TimeZone tz) { absl::TimeZone* expected = nullptr; absl::TimeZone* new_tz = new absl::TimeZone(tz); if (!timezone_ptr.compare_exchange_strong(expected, new_tz, std::memory_order_release, std::memory_order_relaxed)) { ABSL_RAW_LOG(FATAL, "absl::log_internal::SetTimeZone() has already been called"); } } const absl::TimeZone* TimeZone() { return timezone_ptr.load(std::memory_order_acquire); } bool ShouldSymbolizeLogStackTrace() { return symbolize_stack_trace.load(std::memory_order_acquire); } void EnableSymbolizeLogStackTrace(bool on_off) { symbolize_stack_trace.store(on_off, std::memory_order_release); } int MaxFramesInLogStackTrace() { return max_frames_in_stack_trace.load(std::memory_order_acquire); } void SetMaxFramesInLogStackTrace(int max_num_frames) { max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release); } bool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); } void SetExitOnDFatal(bool on_off) { exit_on_dfatal.store(on_off, std::memory_order_release); } bool SuppressSigabortTrace() { return suppress_sigabort_trace.load(std::memory_order_acquire); } bool SetSuppressSigabortTrace(bool on_off) { return suppress_sigabort_trace.exchange(on_off); } } ABSL_NAMESPACE_END }
#include "absl/log/globals.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/log_severity.h" #include "absl/log/internal/globals.h" #include "absl/log/internal/test_helpers.h" #include "absl/log/log.h" #include "absl/log/scoped_mock_log.h" namespace { using ::testing::_; using ::testing::StrEq; auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( new absl::log_internal::LogTestEnvironment); constexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() { return absl::LogSeverityAtLeast::kInfo; } constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() { return absl::LogSeverityAtLeast::kError; } TEST(TestGlobals, MinLogLevel) { EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError); EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError); absl::SetMinLogLevel(DefaultMinLogLevel()); } TEST(TestGlobals, ScopedMinLogLevel) { EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); { absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold( absl::LogSeverityAtLeast::kError); EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError); } EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel()); } TEST(TestGlobals, StderrThreshold) { EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError); EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError); absl::SetStderrThreshold(DefaultStderrThreshold()); } TEST(TestGlobals, ScopedStderrThreshold) { EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); { absl::ScopedStderrThreshold scoped_stderr_threshold( absl::LogSeverityAtLeast::kError); EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError); } EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold()); } TEST(TestGlobals, LogBacktraceAt) { EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111)); absl::SetLogBacktraceLocation("some_file.cc", 111); EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111)); EXPECT_FALSE( absl::log_internal::ShouldLogBacktraceAt("another_file.cc", 222)); } TEST(TestGlobals, LogPrefix) { EXPECT_TRUE(absl::ShouldPrependLogPrefix()); absl::EnableLogPrefix(false); EXPECT_FALSE(absl::ShouldPrependLogPrefix()); absl::EnableLogPrefix(true); EXPECT_TRUE(absl::ShouldPrependLogPrefix()); } TEST(TestGlobals, SetGlobalVLogLevel) { EXPECT_EQ(absl::SetGlobalVLogLevel(42), 0); EXPECT_EQ(absl::SetGlobalVLogLevel(1337), 42); EXPECT_EQ(absl::SetGlobalVLogLevel(0), 1337); } TEST(TestGlobals, SetVLogLevel) { EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 42), 0); EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 1337), 42); EXPECT_EQ(absl::SetVLogLevel("othersetvloglevel", 50), 0); EXPECT_EQ(absl::SetVLogLevel("*pattern*", 1), 0); EXPECT_EQ(absl::SetVLogLevel("*less_generic_pattern*", 2), 1); EXPECT_EQ(absl::SetVLogLevel("pattern_match", 3), 1); EXPECT_EQ(absl::SetVLogLevel("less_generic_pattern_match", 4), 2); } TEST(TestGlobals, AndroidLogTag) { EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), ".*"); EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("native")); absl::SetAndroidNativeTag("test_tag"); EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("test_tag")); EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag("test_tag_fail"), ".*"); } TEST(TestExitOnDFatal, OffTest) { absl::log_internal::SetExitOnDFatal(false); EXPECT_FALSE(absl::log_internal::ExitOnDFatal()); { absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected); EXPECT_CALL(log, Log(absl::kLogDebugFatal, _, "This should not be fatal")); log.StartCapturingLogs(); LOG(DFATAL) << "This should not be fatal"; } } #if GTEST_HAS_DEATH_TEST TEST(TestDeathWhileExitOnDFatal, OnTest) { absl::log_internal::SetExitOnDFatal(true); EXPECT_TRUE(absl::log_internal::ExitOnDFatal()); EXPECT_DEBUG_DEATH({ LOG(DFATAL) << "This should be fatal in debug mode"; }, "This should be fatal in debug mode"); } #endif }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/internal/globals.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/globals_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
19afc211-8c06-4f02-9ab1-ee2b82b6cba1
cpp
tensorflow/tensorflow
strcat
third_party/xla/third_party/tsl/tsl/platform/strcat.cc
third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc
#include "tsl/platform/strcat.h" #include <stdarg.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <algorithm> #include "absl/meta/type_traits.h" #include "tsl/platform/logging.h" namespace tsl { namespace strings { AlphaNum::AlphaNum(Hex hex) { char *const end = &digits_[kFastToBufferSize]; char *writer = end; uint64 value = hex.value; uint64 width = hex.spec; uint64 mask = (static_cast<uint64>(1) << (width - 1) * 4) | value; static const char hexdigits[] = "0123456789abcdef"; do { *--writer = hexdigits[value & 0xF]; value >>= 4; mask >>= 4; } while (mask != 0); piece_ = absl::string_view(writer, end - writer); } static char *Append1(char *out, const AlphaNum &x) { if (x.data() == nullptr) return out; memcpy(out, x.data(), x.size()); return out + x.size(); } static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) { if (x1.data() != nullptr) { memcpy(out, x1.data(), x1.size()); out += x1.size(); } if (x2.data() == nullptr) return out; memcpy(out, x2.data(), x2.size()); return out + x2.size(); } static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2, const AlphaNum &x3, const AlphaNum &x4) { if (x1.data() != nullptr) { memcpy(out, x1.data(), x1.size()); out += x1.size(); } if (x2.data() != nullptr) { memcpy(out, x2.data(), x2.size()); out += x2.size(); } if (x3.data() != nullptr) { memcpy(out, x3.data(), x3.size()); out += x3.size(); } if (x4.data() == nullptr) return out; memcpy(out, x4.data(), x4.size()); return out + x4.size(); } string StrCat(const AlphaNum &a) { return string(a.data(), a.size()); } string StrCat(const AlphaNum &a, const AlphaNum &b) { string result(a.size() + b.size(), '\0'); char *const begin = &*result.begin(); char *out = Append2(begin, a, b); DCHECK_EQ(out, begin + result.size()); return result; } string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) { string result(a.size() + b.size() + c.size(), '\0'); char *const begin = &*result.begin(); char *out = Append2(begin, a, b); out = Append1(out, c); DCHECK_EQ(out, begin + result.size()); return result; } string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, const AlphaNum &d) { string result(a.size() + b.size() + c.size() + d.size(), '\0'); char *const begin = &*result.begin(); char *out = Append4(begin, a, b, c, d); DCHECK_EQ(out, begin + result.size()); return result; } namespace { template <typename string_type, typename = void> struct ResizeUninitializedTraits { using HasMember = std::false_type; static void Resize(string_type *s, size_t new_size) { s->resize(new_size); } }; template <typename string_type> struct ResizeUninitializedTraits< string_type, absl::void_t<decltype(std::declval<string_type &>() .__resize_default_init(237))> > { using HasMember = std::true_type; static void Resize(string_type *s, size_t new_size) { s->__resize_default_init(new_size); } }; static inline void STLStringResizeUninitialized(string *s, size_t new_size) { ResizeUninitializedTraits<string>::Resize(s, new_size); } template <typename string_type> void STLStringReserveAmortized(string_type *s, size_t new_size) { const size_t cap = s->capacity(); if (new_size > cap) { s->reserve((std::max)(new_size, 2 * cap)); } } template <typename string_type> void STLStringResizeUninitializedAmortized(string_type *s, size_t new_size) { STLStringReserveAmortized(s, new_size); STLStringResizeUninitialized(s, new_size); } } namespace internal { string CatPieces(std::initializer_list<absl::string_view> pieces) { size_t total_size = 0; for (const absl::string_view piece : pieces) total_size += piece.size(); string result(total_size, '\0'); char *const begin = &*result.begin(); char *out = begin; for (const absl::string_view piece : pieces) { const size_t this_size = piece.size(); memcpy(out, piece.data(), this_size); out += this_size; } DCHECK_EQ(out, begin + result.size()); return result; } #define DCHECK_NO_OVERLAP(dest, src) \ DCHECK_GE(uintptr_t((src).data() - (dest).data()), uintptr_t((dest).size())) void AppendPieces(string *result, std::initializer_list<absl::string_view> pieces) { size_t old_size = result->size(); size_t total_size = old_size; for (const absl::string_view piece : pieces) { DCHECK_NO_OVERLAP(*result, piece); total_size += piece.size(); } STLStringResizeUninitializedAmortized(result, total_size); char *const begin = &*result->begin(); char *out = begin + old_size; for (const absl::string_view piece : pieces) { const size_t this_size = piece.size(); memcpy(out, piece.data(), this_size); out += this_size; } DCHECK_EQ(out, begin + result->size()); } } void StrAppend(string *result, const AlphaNum &a) { DCHECK_NO_OVERLAP(*result, a); result->append(a.data(), a.size()); } void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) { DCHECK_NO_OVERLAP(*result, a); DCHECK_NO_OVERLAP(*result, b); string::size_type old_size = result->size(); STLStringResizeUninitializedAmortized(result, old_size + a.size() + b.size()); char *const begin = &*result->begin(); char *out = Append2(begin + old_size, a, b); DCHECK_EQ(out, begin + result->size()); } void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) { DCHECK_NO_OVERLAP(*result, a); DCHECK_NO_OVERLAP(*result, b); DCHECK_NO_OVERLAP(*result, c); string::size_type old_size = result->size(); STLStringResizeUninitializedAmortized( result, old_size + a.size() + b.size() + c.size()); char *const begin = &*result->begin(); char *out = Append2(begin + old_size, a, b); out = Append1(out, c); DCHECK_EQ(out, begin + result->size()); } void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b, const AlphaNum &c, const AlphaNum &d) { DCHECK_NO_OVERLAP(*result, a); DCHECK_NO_OVERLAP(*result, b); DCHECK_NO_OVERLAP(*result, c); DCHECK_NO_OVERLAP(*result, d); string::size_type old_size = result->size(); STLStringResizeUninitializedAmortized( result, old_size + a.size() + b.size() + c.size() + d.size()); char *const begin = &*result->begin(); char *out = Append4(begin + old_size, a, b, c, d); DCHECK_EQ(out, begin + result->size()); } } }
#include "tsl/platform/strcat.h" #include <string> #include "absl/strings/string_view.h" #include "tsl/platform/stringprintf.h" #include "tsl/platform/test.h" #include "tsl/platform/types.h" #ifdef _MSC_VER typedef ptrdiff_t ssize_t; #endif namespace tsl { namespace strings { TEST(StrCat, Ints) { const int16_t s = -1; const uint16 us = 2; const int i = -3; const unsigned int ui = 4; const int32_t l = -5; const uint32 ul = 6; const int64_t ll = -7; const uint64 ull = 8; const ptrdiff_t ptrdiff = -9; const size_t size = 10; const ssize_t ssize = -11; const intptr_t intptr = -12; const uintptr_t uintptr = 13; string answer; answer = StrCat(s, us); EXPECT_EQ(answer, "-12"); answer = StrCat(i, ui); EXPECT_EQ(answer, "-34"); answer = StrCat(l, ul); EXPECT_EQ(answer, "-56"); answer = StrCat(ll, ull); EXPECT_EQ(answer, "-78"); answer = StrCat(ptrdiff, size); EXPECT_EQ(answer, "-910"); answer = StrCat(ssize, intptr); EXPECT_EQ(answer, "-11-12"); answer = StrCat(uintptr, 0); EXPECT_EQ(answer, "130"); } TEST(StrCat, Floats) { const int s = 0; const float f = 1.5f; const double d = 1.5; const bfloat16 bf(1.5f); string answer; answer = StrCat(s, f); EXPECT_EQ(answer, "01.5"); answer = StrCat(s, d); EXPECT_EQ(answer, "01.5"); answer = StrCat(s, bf); EXPECT_EQ(answer, "01.5"); } TEST(StrCat, Nulls) { string result; absl::string_view v; string strs[] = {"Hello", "Cruel", "World"}; result = StrCat(v); EXPECT_EQ(result, ""); result = StrCat(strs[0], v); EXPECT_EQ(result, "Hello"); result = StrCat(v, strs[0]); EXPECT_EQ(result, "Hello"); result = StrCat(v, strs[0], strs[1]); EXPECT_EQ(result, "HelloCruel"); result = StrCat(strs[0], v, strs[1]); EXPECT_EQ(result, "HelloCruel"); result = StrCat(strs[0], strs[1], v); EXPECT_EQ(result, "HelloCruel"); result = StrCat(v, strs[0], strs[1], strs[2]); EXPECT_EQ(result, "HelloCruelWorld"); result = StrCat(strs[0], v, strs[1], strs[2]); EXPECT_EQ(result, "HelloCruelWorld"); result = StrCat(strs[0], strs[1], v, strs[2]); EXPECT_EQ(result, "HelloCruelWorld"); result = StrCat(strs[0], strs[1], strs[2], v); EXPECT_EQ(result, "HelloCruelWorld"); } TEST(StrCat, Basics) { string result; string strs[] = {"Hello", "Cruel", "World"}; absl::string_view pieces[] = {"Hello", "Cruel", "World"}; const char *c_strs[] = {"Hello", "Cruel", "World"}; int32 i32s[] = {'H', 'C', 'W'}; uint64 ui64s[] = {12345678910LL, 10987654321LL}; result = StrCat(false, true, 2, 3); EXPECT_EQ(result, "0123"); result = StrCat(-1); EXPECT_EQ(result, "-1"); result = StrCat(0.5); EXPECT_EQ(result, "0.5"); result = StrCat(strs[1], pieces[2]); EXPECT_EQ(result, "CruelWorld"); result = StrCat(strs[0], ", ", pieces[2]); EXPECT_EQ(result, "Hello, World"); result = StrCat(strs[0], ", ", strs[1], " ", strs[2], "!"); EXPECT_EQ(result, "Hello, Cruel World!"); result = StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]); EXPECT_EQ(result, "Hello, Cruel World"); result = StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]); EXPECT_EQ(result, "Hello, Cruel World"); result = StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!"); EXPECT_EQ(result, "ASCII 72, 67 87!"); result = StrCat(ui64s[0], ", ", ui64s[1], "!"); EXPECT_EQ(result, "12345678910, 10987654321!"); string one = "1"; result = StrCat("And a ", one.size(), " and a ", &result[2] - &result[0], " and a ", one, " 2 3 4", "!"); EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!"); result = StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0); EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33"); float f = 100000.5; result = StrCat("A hundred K and a half is ", f); EXPECT_EQ(result, "A hundred K and a half is 100000.5"); double d = f; d *= d; result = StrCat("A hundred K and a half squared is ", d); EXPECT_EQ(result, "A hundred K and a half squared is 10000100000.25"); result = StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999); EXPECT_EQ(result, "12333444455555666666777777788888888999999999"); } TEST(StrCat, MaxArgs) { string result; result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a"); EXPECT_EQ(result, "123456789a"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b"); EXPECT_EQ(result, "123456789ab"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c"); EXPECT_EQ(result, "123456789abc"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d"); EXPECT_EQ(result, "123456789abcd"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e"); EXPECT_EQ(result, "123456789abcde"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f"); EXPECT_EQ(result, "123456789abcdef"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g"); EXPECT_EQ(result, "123456789abcdefg"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h"); EXPECT_EQ(result, "123456789abcdefgh"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i"); EXPECT_EQ(result, "123456789abcdefghi"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j"); EXPECT_EQ(result, "123456789abcdefghij"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"); EXPECT_EQ(result, "123456789abcdefghijk"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"); EXPECT_EQ(result, "123456789abcdefghijkl"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"); EXPECT_EQ(result, "123456789abcdefghijklm"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n"); EXPECT_EQ(result, "123456789abcdefghijklmn"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o"); EXPECT_EQ(result, "123456789abcdefghijklmno"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"); EXPECT_EQ(result, "123456789abcdefghijklmnop"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q"); EXPECT_EQ(result, "123456789abcdefghijklmnopq"); result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"); EXPECT_EQ(result, "12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"); } TEST(StrAppend, Basics) { string result = "existing text"; string strs[] = {"Hello", "Cruel", "World"}; absl::string_view pieces[] = {"Hello", "Cruel", "World"}; const char *c_strs[] = {"Hello", "Cruel", "World"}; int32 i32s[] = {'H', 'C', 'W'}; uint64 ui64s[] = {12345678910LL, 10987654321LL}; string::size_type old_size = result.size(); StrAppend(&result, strs[0]); EXPECT_EQ(result.substr(old_size), "Hello"); old_size = result.size(); StrAppend(&result, strs[1], pieces[2]); EXPECT_EQ(result.substr(old_size), "CruelWorld"); old_size = result.size(); StrAppend(&result, strs[0], ", ", pieces[2]); EXPECT_EQ(result.substr(old_size), "Hello, World"); old_size = result.size(); StrAppend(&result, strs[0], ", ", strs[1], " ", strs[2], "!"); EXPECT_EQ(result.substr(old_size), "Hello, Cruel World!"); old_size = result.size(); StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]); EXPECT_EQ(result.substr(old_size), "Hello, Cruel World"); old_size = result.size(); StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]); EXPECT_EQ(result.substr(old_size), "Hello, Cruel World"); old_size = result.size(); StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!"); EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!"); old_size = result.size(); StrAppend(&result, ui64s[0], ", ", ui64s[1], "!"); EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!"); string one = "1"; old_size = result.size(); StrAppend(&result, "And a ", one.size(), " and a ", &result[2] - &result[0], " and a ", one, " 2 3 4", "!"); EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!"); old_size = result.size(); StrAppend(&result, "To output a char by ASCII/numeric value, use +: ", '!' + 0); EXPECT_EQ(result.substr(old_size), "To output a char by ASCII/numeric value, use +: 33"); float f = 100000.5; old_size = result.size(); StrAppend(&result, "A hundred K and a half is ", f); EXPECT_EQ(result.substr(old_size), "A hundred K and a half is 100000.5"); double d = f; d *= d; old_size = result.size(); StrAppend(&result, "A hundred K and a half squared is ", d); EXPECT_EQ(result.substr(old_size), "A hundred K and a half squared is 10000100000.25"); old_size = result.size(); StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9); EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889"); old_size = result.size(); StrAppend(&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "No limit thanks to C++11's variadic templates"); EXPECT_EQ(result.substr(old_size), "12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" "No limit thanks to C++11's variadic templates"); } TEST(StrAppend, Death) { string s = "self"; EXPECT_DEBUG_DEATH(StrAppend(&s, s.c_str() + 1), "Check failed:"); EXPECT_DEBUG_DEATH(StrAppend(&s, s), "Check failed:"); } static void CheckHex64(uint64 v) { string actual = StrCat(Hex(v, kZeroPad16)); string expected = Printf("%016llx", static_cast<unsigned long long>(v)); EXPECT_EQ(expected, actual) << " decimal value " << v; actual = StrCat(Hex(v, kZeroPad8)); expected = Printf("%08llx", static_cast<unsigned long long>(v)); EXPECT_EQ(expected, actual) << " decimal value " << v; actual = StrCat(Hex(v)); expected = Printf("%llx", static_cast<unsigned long long>(v)); EXPECT_EQ(expected, actual) << " decimal value " << v; } static void CheckHex32(uint32 v) { string actual = StrCat(Hex(v, kZeroPad8)); string expected = Printf("%08x", v); EXPECT_EQ(expected, actual) << " decimal value " << v; actual = StrCat(Hex(v)); expected = Printf("%x", v); EXPECT_EQ(expected, actual) << " decimal value " << v; } static void CheckHexSigned32(int32_t v) { string actual = StrCat(Hex(v, kZeroPad8)); string expected = Printf("%08x", v); EXPECT_EQ(expected, actual) << " decimal value " << v; actual = StrCat(Hex(v)); expected = Printf("%x", v); EXPECT_EQ(expected, actual) << " decimal value " << v; } static void TestFastPrints() { for (int i = 0; i < 10000; i++) { CheckHex64(i); CheckHex32(i); CheckHexSigned32(i); CheckHexSigned32(-i); } CheckHex64(0x123456789abcdef0ull); CheckHex32(0x12345678); int8_t minus_one_8bit = -1; EXPECT_EQ("ff", StrCat(Hex(minus_one_8bit))); int16_t minus_one_16bit = -1; EXPECT_EQ("ffff", StrCat(Hex(minus_one_16bit))); } TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ef1402fc-707f-440d-bf71-7892e6bea482
cpp
google/libaddressinput
string_util
cpp/src/util/string_util.cc
cpp/test/util/string_util_test.cc
#include "string_util.h" #include <cassert> #include <cstddef> #include <stdint.h> #include <string> #include <vector> namespace i18n { namespace addressinput { std::string DoReplaceStringPlaceholders(const std::string& format_string, const std::vector<std::string>& subst) { size_t substitutions = subst.size(); size_t sub_length = 0; for (std::vector<std::string>::const_iterator iter = subst.begin(); iter != subst.end(); ++iter) { sub_length += iter->length(); } std::string formatted; formatted.reserve(format_string.length() + sub_length); for (std::string::const_iterator i = format_string.begin(); i != format_string.end(); ++i) { if ('$' == *i) { if (i + 1 != format_string.end()) { ++i; assert('$' == *i || '1' <= *i); if ('$' == *i) { while (i != format_string.end() && '$' == *i) { formatted.push_back('$'); ++i; } --i; } else { uintptr_t index = 0; while (i != format_string.end() && '0' <= *i && *i <= '9') { index *= 10; index += *i - '0'; ++i; } --i; index -= 1; if (index < substitutions) formatted.append(subst.at(index)); } } } else { formatted.push_back(*i); } } return formatted; } } }
#include "util/string_util.h" #include <string> #include <vector> #include <gtest/gtest.h> namespace { using i18n::addressinput::DoReplaceStringPlaceholders; TEST(StringUtilTest, Ok) { const std::vector<std::string> subst{ "A", "B", "C", }; EXPECT_EQ("aA,bB,cC", DoReplaceStringPlaceholders("a$1,b$2,c$3", subst)); } TEST(StringUtilTest, FewParameters) { const std::vector<std::string> subst{ "A", "B", "C", }; EXPECT_EQ("aA,bB,cC,d,aA", DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,a$1", subst)); } TEST(StringUtilTest, MoreThan9Parameters) { const std::vector<std::string> subst{ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", }; EXPECT_EQ("aA,bB,cC,dD,eE,fF,gG,hH,iI,jJ,kK,aA", DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,e$5,f$6,g$7,h$8,i$9," "j$10,k$11,a$1", subst)); } TEST(StringUtilTest, ConsecutiveDollarSigns) { const std::vector<std::string> subst{ "A", "B", "C", }; EXPECT_EQ("$1 $$2 $$$3", DoReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst)); } }
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/string_util.cc
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/string_util_test.cc
2610f7b1043d6784ada41392fc9392d1ea09ea07
62bf0644-5132-4e87-bff3-4b0982acdd07
cpp
tensorflow/tensorflow
infeed_token_propagation
third_party/xla/xla/service/infeed_token_propagation.cc
third_party/xla/xla/service/infeed_token_propagation_test.cc
#include "xla/service/infeed_token_propagation.h" #include <cstdint> #include <string_view> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_dce.h" #include "xla/service/tuple_simplifier.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { bool IsDanglingInfeed(HloInstruction* infeed) { CHECK(infeed->opcode() == HloOpcode::kInfeed); if (infeed->has_sharding()) { return false; } if (const HloInstruction* after_all = infeed->operand(0); after_all->opcode() != HloOpcode::kAfterAll || after_all->operand_count() != 0) { return false; } for (const HloInstruction* user : infeed->users()) { if (user->opcode() == HloOpcode::kGetTupleElement && user->tuple_index() == 1) { return false; } } return true; } bool IsDanglingOutfeed(HloInstruction* outfeed) { CHECK(outfeed->opcode() == HloOpcode::kOutfeed); if (outfeed->has_sharding()) { return false; } if (const HloInstruction* after_all = outfeed->operand(1); after_all->opcode() != HloOpcode::kAfterAll || after_all->operand_count() != 0) { return false; } if (outfeed->user_count() != 0) { return false; } return true; } HloInstruction* ReconstructTuple(HloInstruction* tuple) { CHECK(tuple->shape().IsTuple()); HloComputation* computation = tuple->parent(); std::vector<HloInstruction*> gtes; gtes.resize(tuple->shape().tuple_shapes_size()); for (int64_t idx = 0; idx < gtes.size(); ++idx) { gtes[idx] = computation->AddInstruction( HloInstruction::CreateGetTupleElement(tuple, idx)); } return computation->AddInstruction(HloInstruction::CreateTuple(gtes)); } absl::StatusOr<HloInstruction*> InsertTokenIntoTuple(HloInstruction* tuple, bool add_token_operand) { CHECK(tuple->shape().IsTuple()); HloComputation* computation = tuple->parent(); std::vector<HloInstruction*> original_users = tuple->users(); HloInstruction* original_tuple = ReconstructTuple(tuple); for (HloInstruction* original_user : original_users) { for (int64_t idx : original_user->operand_indices(tuple)) { TF_RETURN_IF_ERROR( original_user->ReplaceOperandWith(idx, original_tuple)); } } *tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape(); if (add_token_operand) { tuple->AppendOperand( computation->AddInstruction(HloInstruction::CreateToken())); } HloInstruction* input_token_gte = computation->AddInstruction(HloInstruction::CreateGetTupleElement( tuple, tuple->shape().tuple_shapes_size() - 1)); return input_token_gte; } } absl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) { CHECK_EQ(conditional->opcode(), HloOpcode::kConditional); for (HloComputation* branch : conditional->branch_computations()) { HloInstruction* parameter = branch->parameter_instruction(0); if (!parameter->shape().IsTuple()) { *parameter->mutable_shape() = ShapeUtil::MakeTupleShape({parameter->shape()}); HloInstruction* original = branch->AddInstruction( HloInstruction::CreateGetTupleElement(parameter, 0)); TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original)); } int64_t branch_operand_idx = conditional->branch_index(branch) + 1; HloInstruction* branch_tuple = conditional->mutable_operand(branch_operand_idx); if (!branch_tuple->shape().IsTuple()) { branch_tuple = conditional->parent()->AddInstruction( HloInstruction::CreateTuple({branch_tuple})); TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape( branch_operand_idx, branch_tuple)); } if (branch_tuple->opcode() == HloOpcode::kParameter) { branch_tuple = ReconstructTuple(branch_tuple); TF_RETURN_IF_ERROR( conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple)); } HloInstruction* root = branch->root_instruction(); if (root->opcode() != HloOpcode::kTuple) { root = ReconstructTuple(root); branch->set_root_instruction(root); } } CHECK(conditional->shape().IsTuple()); if (conditional->IsRoot()) { HloInstruction* new_root = ReconstructTuple(conditional); conditional->parent()->set_root_instruction(new_root); } return absl::OkStatus(); } absl::Status CanonicalizeWhileInstruction(HloInstruction* loop) { CHECK_EQ(loop->opcode(), HloOpcode::kWhile); HloComputation* body = loop->while_body(); HloComputation* cond = loop->while_condition(); HloInstruction* body_parameter = body->parameter_instruction(0); if (!body_parameter->shape().IsTuple()) { *body_parameter->mutable_shape() = ShapeUtil::MakeTupleShape({body_parameter->shape()}); HloInstruction* original = body->AddInstruction( HloInstruction::CreateGetTupleElement(body_parameter, 0)); TF_RETURN_IF_ERROR( body_parameter->ReplaceAllUsesWithDifferentShape(original)); } HloInstruction* root = body->root_instruction(); if (!root->shape().IsTuple()) { root = body->AddInstruction(HloInstruction::CreateTuple({root})); body->set_root_instruction(root, true); } HloInstruction* cond_parameter = cond->parameter_instruction(0); if (!cond_parameter->shape().IsTuple()) { *cond_parameter->mutable_shape() = ShapeUtil::MakeTupleShape({cond_parameter->shape()}); HloInstruction* original = cond->AddInstruction( HloInstruction::CreateGetTupleElement(cond_parameter, 0)); TF_RETURN_IF_ERROR( cond_parameter->ReplaceAllUsesWithDifferentShape(original)); } if (!loop->shape().IsTuple()) { *loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()}); HloInstruction* original = loop->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(loop, 0)); TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original)); } HloInstruction* loop_tuple = loop->mutable_operand(0); if (!loop_tuple->shape().IsTuple()) { loop_tuple = loop->parent()->AddInstruction( HloInstruction::CreateTuple({loop_tuple})); TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple)); } if (loop_tuple->opcode() == HloOpcode::kParameter) { loop_tuple = ReconstructTuple(loop_tuple); TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple)); } if (root->opcode() != HloOpcode::kTuple) { root = ReconstructTuple(root); body->set_root_instruction(root); } if (loop->IsRoot()) { HloInstruction* new_root = ReconstructTuple(loop); loop->parent()->set_root_instruction(new_root); } return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() { HloComputation* comp = dangling_instruction_->parent(); dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0]; CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional); for (HloComputation* branch : dangling_instruction_->branch_computations()) { HloInstruction* root = branch->root_instruction(); if (branch == comp) { TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, false).status()); root->AppendOperand(output_token_); } else { TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, true).status()); } } HloInstruction* parameter = comp->parameter_instruction(0); TF_ASSIGN_OR_RETURN( HloInstruction * input_token_gte, InsertTokenIntoTuple(parameter, false)); TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte)); int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1; HloInstruction* branch_tuple = dangling_instruction_->mutable_operand(branch_operand_idx); TF_ASSIGN_OR_RETURN( HloInstruction * next_input_token_gte, InsertTokenIntoTuple(branch_tuple, true)); TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape( branch_operand_idx, branch_tuple)); input_token_ = branch_tuple->mutable_operand(next_input_token_gte->tuple_index()); TF_ASSIGN_OR_RETURN( output_token_, InsertTokenIntoTuple(dangling_instruction_, false)); return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() { HloComputation* comp = dangling_instruction_->parent(); dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0]; CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile); HloInstruction* root = comp->root_instruction(); TF_RETURN_IF_ERROR( InsertTokenIntoTuple(root, false).status()); root->AppendOperand(output_token_); HloInstruction* body_parameter = comp->parameter_instruction(0); TF_ASSIGN_OR_RETURN( HloInstruction * input_token_gte, InsertTokenIntoTuple(body_parameter, false)); TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte)); HloComputation* cond = dangling_instruction_->while_condition(); HloInstruction* cond_parameter = cond->parameter_instruction(0); TF_RETURN_IF_ERROR( InsertTokenIntoTuple(cond_parameter, false) .status()); HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0); TF_ASSIGN_OR_RETURN( input_token_, InsertTokenIntoTuple(while_tuple, true)); TF_RETURN_IF_ERROR( dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple)); TF_ASSIGN_OR_RETURN( output_token_, InsertTokenIntoTuple(dangling_instruction_, false)); return absl::OkStatus(); } absl::Status InfeedTokenPropagation::PropagateToken() { HloComputation* comp = dangling_instruction_->parent(); if (comp->IsEntryComputation()) { return absl::OkStatus(); } VLOG(2) << "Propagating tokens for: " << dangling_instruction_->name(); HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0]; if (caller->has_sharding()) { return absl::OkStatus(); } if (caller->opcode() == HloOpcode::kConditional) { TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller)); TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch()); } else if (caller->opcode() == HloOpcode::kWhile && comp == caller->while_body()) { TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller)); TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody()); } else { VLOG(2) << "Unhandled computation: " << comp->name(); return absl::OkStatus(); } return PropagateToken(); } absl::StatusOr<bool> InfeedTokenPropagation::Run( HloModule* module, const absl::flat_hash_set<std::string_view>& execution_threads) { VLOG(5) << "Before InfeedTokenPropagation:"; XLA_VLOG_LINES(5, module->ToString()); std::vector<HloInstruction*> dangling_infeeds; std::vector<HloInstruction*> dangling_outfeeds; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { if (!computation->IsEntryComputation()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kInfeed && IsDanglingInfeed(instruction)) { VLOG(1) << "Found dangling infeed: " << instruction->ToString(); dangling_infeeds.push_back(instruction); } else if (instruction->opcode() == HloOpcode::kOutfeed && IsDanglingOutfeed(instruction)) { VLOG(1) << "Found dangling outfeed: " << instruction->ToString(); dangling_outfeeds.push_back(instruction); } } } } bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty(); if (changed) { call_graph_ = CallGraph::Build(module); if (!call_graph_->IsFlattened()) { return FailedPrecondition( "Call graph must be flattened before infeed token propagation."); } } for (HloInstruction* dangling_infeed : dangling_infeeds) { dangling_instruction_ = dangling_infeed; input_token_ = dangling_infeed->mutable_operand(0); output_token_ = dangling_infeed->AddInstruction( HloInstruction::CreateGetTupleElement(dangling_infeed, 1)); TF_RETURN_IF_ERROR(PropagateToken()); } for (HloInstruction* dangling_outfeed : dangling_outfeeds) { dangling_instruction_ = dangling_outfeed; input_token_ = dangling_outfeed->mutable_operand(1); output_token_ = dangling_outfeed; TF_RETURN_IF_ERROR(PropagateToken()); } if (changed) { TF_RETURN_IF_ERROR( TupleSimplifier().Run(module, execution_threads).status()); TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status()); } VLOG(5) << "After InfeedTokenPropagation:"; XLA_VLOG_LINES(5, module->ToString()); return changed; } }
#include "xla/service/infeed_token_propagation.h" #include <string_view> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { class InfeedTokenPropagationTest : public HloTestBase { protected: InfeedTokenPropagationTest() = default; }; TEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) { constexpr std::string_view hlo = R"( HloModule main ENTRY main { token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT gte.0 = get-tuple-element(infeed.0), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) { constexpr std::string_view hlo = R"( HloModule main ENTRY main { arg.0 = s32[] parameter(0) tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[]) ROOT tuple.1 = tuple() } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(InfeedTokenPropagationTest, ConditionalInfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) true_tuple.0 = tuple() false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { arg.0 = s32[] parameter(0) pred.0 = pred[] constant(true) true_tuple.0 = tuple(arg.0) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); const HloInstruction* true_tuple = cond->operand(1); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); const HloInstruction* false_tuple = cond->operand(2); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, NonTupleConditional) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = s32[] parameter(0) outfeed_tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { arg.0 = s32[] parameter(0) pred.0 = pred[] constant(true) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = cond->mutable_operand(1); EXPECT_TRUE(true_tuple->shape().IsTuple()); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { ROOT arg.0 = () parameter(0) one.0 = s32[] constant(1) outfeed_tuple.0 = tuple(one.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } ENTRY main { pred.0 = pred[] constant(true) true_tuple.0 = tuple() false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } TEST_F(InfeedTokenPropagationTest, WhileInfeed) { constexpr std::string_view hlo = R"( HloModule main comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) ROOT tuple.0 = tuple() } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1))); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken()); } TEST_F(InfeedTokenPropagationTest, WhileOutfeed) { constexpr std::string_view hlo = R"( HloModule main comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) gte.0 = get-tuple-element(arg.0), index=0 ROOT tuple.0 = tuple(gte.0) } cond { arg.0 = (s32[]) parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { arg.0 = s32[] parameter(0) while_tuple.0 = tuple(arg.0) ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(), op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken()); } TEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) { constexpr std::string_view hlo = R"( HloModule main comp { ROOT arg.0 = () parameter(0) one.0 = s32[] constant(1) outfeed_tuple.0 = tuple(one.0) token.0 = after-all() outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[]) } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken()); } TEST_F(InfeedTokenPropagationTest, NonTupleWhile) { constexpr std::string_view hlo = R"( HloModule main comp { ROOT arg.0 = s32[] parameter(0) tuple.0 = tuple(arg.0) token.0 = after-all() outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[]) } cond { arg.0 = s32[] parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { arg.0 = s32[] parameter(0) ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_TRUE(loop->shape().IsTuple()); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll())); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(), op::Outfeed())); HloInstruction* body_param = body_comp->parameter_instruction(0); EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken()); HloComputation* cond_comp = FindComputation(module.get(), "cond"); HloInstruction* cond_param = cond_comp->parameter_instruction(0); EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2); EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken()); } TEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) { constexpr std::string_view hlo = R"( HloModule main true_comp { arg.0 = (s32[]) parameter(0) token.0 = after-all() outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[]) ROOT tuple.0 = tuple() } false_comp { arg.0 = () parameter(0) ROOT tuple.0 = tuple() } comp { arg.0 = () parameter(0) token.0 = after-all() infeed.0 = (s32[], token[]) infeed(token.0) gte.0 = get-tuple-element(infeed.0), index=0 pred.0 = pred[] constant(true) true_tuple.0 = tuple(gte.0) false_tuple.0 = tuple() ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp } cond { arg.0 = () parameter(0) ROOT true.0 = pred[] constant(true) } ENTRY main { while_tuple.0 = tuple() ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); InfeedTokenPropagation itp; TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* loop = FindInstruction(module.get(), "while.0"); EXPECT_EQ(loop->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken()); EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken()); HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0"); EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken()); EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken()); HloComputation* body_comp = FindComputation(module.get(), "comp"); EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::GetTupleElement(op::Infeed(), 1), op::GetTupleElement(op::Conditional(), 0))); HloInstruction* cond = FindInstruction(module.get(), "cond.0"); EXPECT_EQ(cond->shape().tuple_shapes_size(), 1); EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken()); HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0"); EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2); EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken()); HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0"); EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0); HloComputation* true_comp = FindComputation(module.get(), "true_comp"); EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed())); HloComputation* false_comp = FindComputation(module.get(), "false_comp"); EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
58e77cc6-ed69-45d3-b6b2-27bf9c79df8e
cpp
google/tensorstore
future_sender
tensorstore/util/execution/future_sender.h
tensorstore/util/execution/future_sender_test.cc
#ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_ #define TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_ #include <functional> #include <type_traits> #include <utility> #include "absl/status/status.h" #include "tensorstore/util/execution/execution.h" #include "tensorstore/util/future.h" namespace tensorstore { namespace internal_future { template <typename Receiver, typename = void, typename = void, typename = void, typename = void> struct IsFutureReceiver : public std::false_type {}; template <typename Receiver, typename T> struct IsFutureReceiver< Receiver, T, decltype(execution::set_value(std::declval<Receiver&>(), std::declval<T>())), decltype(execution::set_error(std::declval<Receiver&>(), std::declval<absl::Status>())), decltype(execution::set_cancel(std::declval<Receiver&>()))> : public std::true_type {}; } template <typename T, typename... V> std::enable_if_t<(!std::is_const_v<T> && std::is_constructible_v<typename Promise<T>::result_type, std::in_place_t, V...>)> set_value(const Promise<T>& promise, V&&... v) { promise.SetResult(std::in_place, std::forward<V>(v)...); } template <typename T, typename... V> std::enable_if_t<(!std::is_const_v<T> && std::is_constructible_v<typename Promise<T>::result_type, std::in_place_t, V...>)> set_value(std::reference_wrapper<const Promise<T>> promise, V&&... v) { set_value(promise.get(), std::forward<V>(v)...); } template <typename T> void set_error(const Promise<T>& promise, absl::Status error) { promise.SetResult(std::move(error)); } template <typename T> void set_error(std::reference_wrapper<const Promise<T>> promise, absl::Status error) { set_error(promise.get(), std::move(error)); } template <typename T> void set_cancel(const Promise<T>& promise) { promise.SetResult(absl::CancelledError("")); } template <typename T> void set_cancel(std::reference_wrapper<const Promise<T>> promise) { set_cancel(promise.get()); } template <typename T, typename Receiver> std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value> submit(Future<T>& f, Receiver receiver) { f.Force(); f.ExecuteWhenReady([r = std::move(receiver)](ReadyFuture<T> ready) mutable { auto& result = ready.result(); if (result.has_value()) { execution::set_value(r, result.value()); } else { auto status = ready.status(); if (status.code() == absl::StatusCode::kCancelled) { execution::set_cancel(r); } else { execution::set_error(r, std::move(status)); } } }); } template <typename T, typename Receiver> std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value> submit(std::reference_wrapper<Future<T>> f, Receiver&& receiver) { submit(f.get(), std::forward<Receiver>(receiver)); } template <typename T, typename Sender> Future<T> MakeSenderFuture(Sender sender) { auto pair = PromiseFuturePair<T>::Make(); struct Callback { Sender sender; void operator()(Promise<T> promise) { execution::submit(sender, promise); } }; pair.promise.ExecuteWhenForced(Callback{std::move(sender)}); return pair.future; } } #endif
#include "tensorstore/util/execution/future_sender.h" #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorstore/util/execution/any_receiver.h" #include "tensorstore/util/execution/any_sender.h" #include "tensorstore/util/execution/execution.h" #include "tensorstore/util/execution/sender.h" #include "tensorstore/util/execution/sender_testutil.h" #include "tensorstore/util/future.h" #include "tensorstore/util/result.h" namespace { using ::tensorstore::Promise; using ::tensorstore::PromiseFuturePair; using ::tensorstore::Result; TEST(PromiseReceiverTest, SetCancel) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_cancel(pair.promise); EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError(""))); } TEST(PromiseReceiverTest, AnyReceiverSetCancel) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_cancel( tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise))); EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError(""))); } TEST(PromiseReceiverTest, SetValue) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_value(pair.promise, 3); EXPECT_EQ(pair.future.result(), Result<int>(3)); } TEST(PromiseReceiverTest, SetValueThenSetCancel) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_value(pair.promise, 3); tensorstore::execution::set_cancel(pair.promise); EXPECT_EQ(pair.future.result(), Result<int>(3)); } TEST(PromiseReceiverTest, AnyReceiverSetValue) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_value( tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise)), 3); EXPECT_EQ(pair.future.result(), Result<int>(3)); } TEST(PromiseReceiverTest, SetError) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_error( tensorstore::AnyReceiver<absl::Status, int>(pair.promise), absl::UnknownError("message")); EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message"))); } TEST(PromiseReceiverTest, AnyReceiverSetError) { auto pair = PromiseFuturePair<int>::Make(); tensorstore::execution::set_error(std::cref(pair.promise), absl::UnknownError("message")); EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message"))); } TEST(FutureSenderTest, SetValue) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log1, log2; tensorstore::execution::submit(pair.future, tensorstore::LoggingReceiver{&log1}); tensorstore::execution::submit(pair.future, tensorstore::LoggingReceiver{&log2}); EXPECT_THAT(log1, ::testing::ElementsAre()); EXPECT_THAT(log2, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(3); EXPECT_THAT(log1, ::testing::ElementsAre("set_value: 3")); EXPECT_THAT(log2, ::testing::ElementsAre("set_value: 3")); } TEST(FutureSenderTest, AnySenderSetValue) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log; tensorstore::execution::submit( tensorstore::AnySender<absl::Status, int>(pair.future), tensorstore::LoggingReceiver{&log}); EXPECT_THAT(log, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(3); EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3")); } TEST(FutureSenderTest, SetError) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log; tensorstore::execution::submit(std::ref(pair.future), tensorstore::LoggingReceiver{&log}); EXPECT_THAT(log, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(absl::UnknownError("")); EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: ")); } TEST(FutureSenderTest, AnySenderSetError) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log; tensorstore::execution::submit( tensorstore::AnySender<absl::Status, int>(pair.future), tensorstore::LoggingReceiver{&log}); EXPECT_THAT(log, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(absl::UnknownError("")); EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: ")); } TEST(FutureSenderTest, SetCancel) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log; tensorstore::execution::submit(pair.future, tensorstore::LoggingReceiver{&log}); EXPECT_THAT(log, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(absl::CancelledError("")); EXPECT_THAT(log, ::testing::ElementsAre("set_cancel")); } TEST(FutureSenderTest, AnySenderSetCancel) { auto pair = PromiseFuturePair<int>::Make(); bool forced = false; pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; }); std::vector<std::string> log; tensorstore::execution::submit( tensorstore::AnySender<absl::Status, int>(std::ref(pair.future)), tensorstore::LoggingReceiver{&log}); EXPECT_THAT(log, ::testing::ElementsAre()); EXPECT_TRUE(forced); pair.promise.SetResult(absl::CancelledError("")); EXPECT_THAT(log, ::testing::ElementsAre("set_cancel")); } TEST(MakeSenderFutureTest, SetValue) { auto future = tensorstore::MakeSenderFuture<int>(tensorstore::ValueSender<int>{3}); EXPECT_FALSE(future.ready()); EXPECT_EQ(future.result(), Result<int>(3)); } TEST(MakeSenderFutureTest, SetError) { auto future = tensorstore::MakeSenderFuture<int>( tensorstore::ErrorSender<absl::Status>{absl::UnknownError("")}); EXPECT_FALSE(future.ready()); EXPECT_EQ(future.result(), Result<int>(absl::UnknownError(""))); } TEST(MakeSenderFutureTest, SetCancel) { auto future = tensorstore::MakeSenderFuture<int>(tensorstore::CancelSender{}); EXPECT_FALSE(future.ready()); EXPECT_EQ(future.result(), Result<int>(absl::CancelledError(""))); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender.h
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender_test.cc
4f887a6430414cd6088e1743555015b10f116d50
d3fa6f16-f4b2-4a17-85df-c9db628b7db2
cpp
tensorflow/tensorflow
tensor_slice_dataset_op
tensorflow/core/kernels/data/tensor_slice_dataset_op.cc
tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h" #include <string> #include <utility> #include "absl/status/status.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/global_shuffle_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/data/split_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/util/batch_util.h" #include "tsl/platform/mutex.h" #include "tsl/platform/thread_annotations.h" namespace tensorflow { namespace data { constexpr const char* const TensorSliceDatasetOp::kDatasetType; constexpr const char* const TensorSliceDatasetOp::kComponents; constexpr const char* const TensorSliceDatasetOp::kToutputTypes; constexpr const char* const TensorSliceDatasetOp::kOutputShapes; constexpr const char* const TensorSliceDatasetOp::kIsFiles; constexpr const char* const TensorSliceDatasetOp::kReplicateOnSplit; class TensorSliceDatasetOp::Dataset : public DatasetBase { public: explicit Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors, bool is_files, bool replicate_on_split) : DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)), is_files_(is_files), replicate_on_split_(replicate_on_split) { for (const Tensor& t : tensors_) { dtypes_.push_back(t.dtype()); absl::InlinedVector<int64_t, 4UL> element_dim_sizes; for (int i = 1; i < t.dims(); ++i) { element_dim_sizes.push_back(t.dim_size(i)); } partial_shapes_.emplace_back(element_dim_sizes); shapes_.emplace_back(std::move(element_dim_sizes)); } } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix)}); } Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>* split_providers) const override { split_providers->push_back( std::make_unique<IndexSplitProvider>(tensors_[0].dim_size(0))); return absl::OkStatus(); } const DataTypeVector& output_dtypes() const override { return dtypes_; } const std::vector<PartialTensorShape>& output_shapes() const override { return partial_shapes_; } string DebugString() const override { return name_utils::DatasetDebugString(kDatasetType); } int64_t CardinalityInternal(CardinalityOptions options) const override { return tensors_[0].dim_size(0); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { return absl::OkStatus(); } Status CheckExternalState() const override { return absl::OkStatus(); } Status Get(OpKernelContext* ctx, int64 index, std::vector<Tensor>* out_tensors) const override { return Get(AnyContext(ctx), index, out_tensors); } Status Get(AnyContext ctx, int64 index, std::vector<Tensor>* out_tensors) const override { TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index)); out_tensors->clear(); out_tensors->reserve(tensors_.size()); for (int i = 0; i < tensors_.size(); ++i) { out_tensors->push_back(MaybeCopySubSlice(tensors_[i], index)); } return absl::OkStatus(); } absl::Status RandomIndexingCompatible() const override { return absl::OkStatus(); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { std::vector<Node*> components; components.reserve(tensors_.size()); for (const Tensor& t : tensors_) { Node* node; if (!ctx->is_graph_rewrite()) { TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node)); if (is_files_) { Node* file_node; TF_RETURN_IF_ERROR( b->AddIdentity(ctx, "FileIdentity", &node, &file_node)); } } else { TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node)); DCHECK_NE(ctx->input_list(), nullptr); ctx->input_list()->emplace_back(node->name(), t); } components.emplace_back(node); } AttrValue dtypes; b->BuildAttrValue(dtypes_, &dtypes); AttrValue is_files; b->BuildAttrValue(is_files_, &is_files); AttrValue replicate_on_split; b->BuildAttrValue(replicate_on_split_, &replicate_on_split); TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}}, {{kToutputTypes, dtypes}, {kIsFiles, is_files}, {kReplicateOnSplit, replicate_on_split}}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params), global_shuffle_iterator_(dataset()) {} bool SymbolicCheckpointCompatible() const override { return true; } Status Initialize(IteratorContext* ctx) override { if (ctx->split_providers().empty() || dataset()->replicate_on_split_) { split_provider_ = std::make_shared<IndexSplitProvider>( dataset()->tensors_[0].dim_size(0)); } else { TF_ASSIGN_OR_RETURN(split_provider_, GetSingleSplitProvider(ctx, dataset())); } return absl::OkStatus(); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { if (ctx->index_mapper() != nullptr) { return global_shuffle_iterator_.GetNext(ctx, out_tensors, end_of_sequence); } Tensor split; TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence)); if (*end_of_sequence) { return absl::OkStatus(); } int64_t index = split.scalar<int64_t>()(); out_tensors->reserve(dataset()->tensors_.size()); for (size_t i = 0; i < dataset()->tensors_.size(); ++i) { out_tensors->push_back( MaybeCopySubSlice(dataset()->tensors_[i], index)); } *end_of_sequence = false; return absl::OkStatus(); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeSourceNode(std::move(args)); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { TF_RETURN_IF_ERROR(split_provider_->Save( [this](const std::string& key) { return full_name(key); }, writer)); TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer)); return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { if (ctx->restored_element_count().has_value()) { return global_shuffle_iterator_.Restore(prefix(), ctx, reader); } return split_provider_->Restore( [this](const std::string& key) { return full_name(key); }, reader); } private: std::shared_ptr<SplitProvider> split_provider_; GlobalShuffleIterator global_shuffle_iterator_; }; const std::vector<Tensor> tensors_; DataTypeVector dtypes_; std::vector<TensorShape> shapes_; std::vector<PartialTensorShape> partial_shapes_; const bool is_files_; const bool replicate_on_split_; }; TensorSliceDatasetOp::TensorSliceDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutputTypes, &output_types_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_)); if (ctx->HasAttr(kIsFiles)) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kIsFiles, &is_files_)); } if (ctx->HasAttr(kReplicateOnSplit)) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_)); } } void TensorSliceDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) { OpInputList inputs; OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs)); std::vector<Tensor> components; components.reserve(inputs.size()); OP_REQUIRES( ctx, inputs[0].dims() > 0, errors::InvalidArgument("All components must be at least 1-dimensional")); const int64_t num_slices = inputs[0].dim_size(0); for (const Tensor& t : inputs) { components.push_back(t); OP_REQUIRES(ctx, t.dims() > 0, errors::InvalidArgument( "All components must be at least 1-dimensional")); OP_REQUIRES( ctx, t.dim_size(0) == num_slices, errors::InvalidArgument( "All components must have the same size in the 0th dimension")); } *output = new Dataset(ctx, std::move(components), is_files_, replicate_on_split_); OP_REQUIRES_OK(ctx, VerifyTypesMatch((*output)->output_dtypes(), output_types_)); OP_REQUIRES_OK( ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_)); } namespace { REGISTER_KERNEL_BUILDER(Name("TensorSliceDataset").Device(DEVICE_CPU), TensorSliceDatasetOp); } } }
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h" #include <string> #include <utility> #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/serialization_utils.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "tensor_slice_dataset"; class TensorSliceDatasetOpTest : public DatasetOpsTestBase {}; TensorSliceDatasetParams PlainTensorSliceDatasetParams() { std::vector<Tensor> components = { CreateTensor<int64_t>(TensorShape({2}), {1, 2}), CreateTensor<int64_t>(TensorShape({2, 2}), {1, 2, 3, 4}), CreateTensor<uint32>(TensorShape({2}), {2, 3}), CreateTensor<uint32>(TensorShape({2, 2}), {2, 3, 4, 5}), CreateTensor<uint64>(TensorShape({2}), {3, 4}), CreateTensor<uint64>(TensorShape({2, 2}), {3, 4, 5, 6}), CreateTensor<double>(TensorShape({2, 1}), {37.0, 38.0}), CreateTensor<tstring>(TensorShape({2, 1}), {"a", "b"})}; return {std::move(components), kNodeName}; } TensorSliceDatasetParams NestedTensorSliceDatasetParams() { std::vector<Tensor> components = { CreateTensor<Variant>( TensorShape({2, 1}), {CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0}), CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}), CreateTensor<Variant>( TensorShape({2, 1}), {CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"}), CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}), CreateTensor<int64_t>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6})}; return {std::move(components), kNodeName}; } std::vector<GetNextTestCase<TensorSliceDatasetParams>> GetNextTestCases() { return { {PlainTensorSliceDatasetParams(), {CreateTensor<int64_t>(TensorShape({}), {1}), CreateTensor<int64_t>(TensorShape({2}), {1, 2}), CreateTensor<uint32>(TensorShape({}), {2}), CreateTensor<uint32>(TensorShape({2}), {2, 3}), CreateTensor<uint64>(TensorShape({}), {3}), CreateTensor<uint64>(TensorShape({2}), {3, 4}), CreateTensor<double>(TensorShape({1}), {37.0}), CreateTensor<tstring>(TensorShape({1}), {"a"}), CreateTensor<int64_t>(TensorShape({}), {2}), CreateTensor<int64_t>(TensorShape({2}), {3, 4}), CreateTensor<uint32>(TensorShape({}), {3}), CreateTensor<uint32>(TensorShape({2}), {4, 5}), CreateTensor<uint64>(TensorShape({}), {4}), CreateTensor<uint64>(TensorShape({2}), {5, 6}), CreateTensor<double>(TensorShape({1}), {38.0}), CreateTensor<tstring>(TensorShape({1}), {"b"})}}, {NestedTensorSliceDatasetParams(), {CreateTensor<Variant>( TensorShape({1}), {CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}), CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}), CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}}; } class ParameterizedGetNextTest : public TensorSliceDatasetOpTest, public ::testing::WithParamInterface< GetNextTestCase<TensorSliceDatasetParams>> {}; TEST_P(ParameterizedGetNextTest, GetNext) { auto test_case = GetParam(); TF_ASSERT_OK(Initialize(test_case.dataset_params)); std::vector<string> input_names; TF_ASSERT_OK(test_case.dataset_params.GetInputNames(&input_names)); size_t num_tensors_per_slice = input_names.size(); bool end_of_sequence = false; std::vector<Tensor> out_tensors; int cur_slice = 0; while (true) { TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)); if (end_of_sequence) { EXPECT_TRUE(out_tensors.empty()); break; } for (int i = 0; i < out_tensors.size(); ++i) { EXPECT_LT(i + num_tensors_per_slice * cur_slice, test_case.expected_outputs.size()); if (out_tensors[i].dtype() == DT_VARIANT) { const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>(); const Tensor* expected_output = test_case.expected_outputs[i + num_tensors_per_slice * cur_slice] .scalar<Variant>()() .get<Tensor>(); TF_EXPECT_OK(ExpectEqual(*output, *expected_output)); } else { TF_EXPECT_OK(ExpectEqual( out_tensors[i], test_case.expected_outputs[i + num_tensors_per_slice * cur_slice])); } } cur_slice++; } } INSTANTIATE_TEST_SUITE_P(TensorSliceDatasetOpTest, ParameterizedGetNextTest, ::testing::ValuesIn(GetNextTestCases())); TEST_F(TensorSliceDatasetOpTest, DatasetNodeName) { auto dataset_params = PlainTensorSliceDatasetParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(TensorSliceDatasetOpTest, DatasetTypeString) { auto dataset_params = PlainTensorSliceDatasetParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(TensorSliceDatasetOp::kDatasetType))); } std::vector<DatasetOutputDtypesTestCase<TensorSliceDatasetParams>> DatasetOutputTypesTestCases() { return {{PlainTensorSliceDatasetParams(), PlainTensorSliceDatasetParams().output_dtypes()}, {NestedTensorSliceDatasetParams(), NestedTensorSliceDatasetParams().output_dtypes()}}; } DATASET_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams, DatasetOutputTypesTestCases()) std::vector<DatasetOutputShapesTestCase<TensorSliceDatasetParams>> DatasetOutputShapesTestCases() { return {{PlainTensorSliceDatasetParams(), PlainTensorSliceDatasetParams().output_shapes()}, {NestedTensorSliceDatasetParams(), NestedTensorSliceDatasetParams().output_shapes()}}; } DATASET_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams, DatasetOutputShapesTestCases()) std::vector<CardinalityTestCase<TensorSliceDatasetParams>> DatasetCardinalityTestCases() { return {{PlainTensorSliceDatasetParams(), 2}, {NestedTensorSliceDatasetParams(), 2}}; } DATASET_CARDINALITY_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams, DatasetCardinalityTestCases()) std::vector<IteratorOutputDtypesTestCase<TensorSliceDatasetParams>> IteratorOutputTypesTestCases() { return {{PlainTensorSliceDatasetParams(), PlainTensorSliceDatasetParams().output_dtypes()}, {NestedTensorSliceDatasetParams(), NestedTensorSliceDatasetParams().output_dtypes()}}; } ITERATOR_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams, IteratorOutputTypesTestCases()) std::vector<IteratorOutputShapesTestCase<TensorSliceDatasetParams>> IteratorOutputShapesTestCases() { return {{PlainTensorSliceDatasetParams(), PlainTensorSliceDatasetParams().output_shapes()}, {NestedTensorSliceDatasetParams(), NestedTensorSliceDatasetParams().output_shapes()}}; } ITERATOR_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams, IteratorOutputShapesTestCases()) TEST_F(TensorSliceDatasetOpTest, IteratorOutputPrefix) { auto dataset_params = PlainTensorSliceDatasetParams(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( TensorSliceDatasetOp::kDatasetType, dataset_params.iterator_prefix()))); } std::vector<IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>> IteratorSaveAndRestoreTestCases() { return { {PlainTensorSliceDatasetParams(), {0, 1, 2}, {CreateTensor<int64_t>(TensorShape({}), {1}), CreateTensor<int64_t>(TensorShape({2}), {1, 2}), CreateTensor<uint32>(TensorShape({}), {2}), CreateTensor<uint32>(TensorShape({2}), {2, 3}), CreateTensor<uint64>(TensorShape({}), {3}), CreateTensor<uint64>(TensorShape({2}), {3, 4}), CreateTensor<double>(TensorShape({1}), {37.0}), CreateTensor<tstring>(TensorShape({1}), {"a"}), CreateTensor<int64_t>(TensorShape({}), {2}), CreateTensor<int64_t>(TensorShape({2}), {3, 4}), CreateTensor<uint32>(TensorShape({}), {3}), CreateTensor<uint32>(TensorShape({2}), {4, 5}), CreateTensor<uint64>(TensorShape({}), {4}), CreateTensor<uint64>(TensorShape({2}), {5, 6}), CreateTensor<double>(TensorShape({1}), {38.0}), CreateTensor<tstring>(TensorShape({1}), {"b"})}}, {NestedTensorSliceDatasetParams(), {0, 1, 2}, {CreateTensor<Variant>( TensorShape({1}), {CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}), CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}), CreateTensor<Variant>( TensorShape({1}), {CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}), CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}}; } class ParameterizedIteratorSaveAndRestoreTest : public TensorSliceDatasetOpTest, public ::testing::WithParamInterface< IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>> {}; TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) { auto test_case = GetParam(); TF_ASSERT_OK(Initialize(test_case.dataset_params)); std::unique_ptr<SerializationContext> serialization_context; TF_ASSERT_OK(CreateSerializationContext(&serialization_context)); int cur_iteration = 0; bool end_of_sequence = false; auto params = static_cast<TensorSliceDatasetParams&>(test_case.dataset_params); int64_t num_slices = params.num_slices(); size_t num_tensors_per_slice = params.num_tensors_per_slice(); std::vector<Tensor> out_tensors; const std::vector<int>& breakpoints = test_case.breakpoints; for (int breakpoint : breakpoints) { while (cur_iteration < breakpoint) { TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)); cur_iteration++; } if (breakpoint == 0) { EXPECT_FALSE(end_of_sequence); } else if (breakpoint <= num_slices) { for (int i = 0; i < out_tensors.size(); ++i) { if (out_tensors[i].dtype() == DT_VARIANT) { const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>(); const Tensor* expected_output = test_case .expected_outputs[i + num_tensors_per_slice * (cur_iteration - 1)] .scalar<Variant>()() .get<Tensor>(); TF_EXPECT_OK(ExpectEqual(*output, *expected_output)); } else { TF_EXPECT_OK(ExpectEqual( out_tensors[i], test_case.expected_outputs[i + num_tensors_per_slice * (cur_iteration - 1)])); } } } else { EXPECT_TRUE(end_of_sequence); } VariantTensorDataWriter writer; TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer)); std::vector<const VariantTensorData*> data; writer.GetData(&data); VariantTensorDataReader reader(data); TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator", *dataset_, &iterator_)); } } INSTANTIATE_TEST_SUITE_P( TensorSliceDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest, ::testing::ValuesIn(IteratorSaveAndRestoreTestCases())); TEST_F(TensorSliceDatasetOpTest, SplitProvider) { auto params = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape({7}), {{6, 2, 3, 8, 7, 0, 10}}), kNodeName); TF_ASSERT_OK(InitializeRuntime(params)); TF_EXPECT_OK(CheckSplitProviderFullIteration( params, CreateTensors<int64_t>(TensorShape({}), {{6}, {2}, {3}, {8}, {7}, {0}, {10}}))); TF_EXPECT_OK(CheckSplitProviderShardedIteration( params, 3, 1, CreateTensors<int64_t>(TensorShape({}), {{2}, {7}}))); } TEST_F(TensorSliceDatasetOpTest, SplitProviderEmpty) { auto params = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape({0}), {{}}), kNodeName); TF_ASSERT_OK(InitializeRuntime(params)); TF_EXPECT_OK(CheckSplitProviderFullIteration( params, CreateTensors<int64_t>(TensorShape({}), {}))); TF_EXPECT_OK(CheckSplitProviderShardedIteration( params, 3, 1, CreateTensors<int64_t>(TensorShape({}), {}))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f845cd84-f662-46bb-a6e6-300d4c18375b
cpp
google/quiche
quic_lru_cache
quiche/quic/core/quic_lru_cache.h
quiche/quic/core/quic_lru_cache_test.cc
#ifndef QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_ #define QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_ #include <memory> #include "quiche/quic/platform/api/quic_export.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/common/quiche_linked_hash_map.h" namespace quic { template <class K, class V, class Hash = std::hash<K>, class Eq = std::equal_to<K>> class QUICHE_EXPORT QuicLRUCache { private: using HashMapType = typename quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq>; public: using iterator = typename HashMapType::iterator; using const_iterator = typename HashMapType::const_iterator; using reverse_iterator = typename HashMapType::reverse_iterator; using const_reverse_iterator = typename HashMapType::const_reverse_iterator; explicit QuicLRUCache(size_t capacity) : capacity_(capacity) {} QuicLRUCache(const QuicLRUCache&) = delete; QuicLRUCache& operator=(const QuicLRUCache&) = delete; iterator begin() { return cache_.begin(); } const_iterator begin() const { return cache_.begin(); } iterator end() { return cache_.end(); } const_iterator end() const { return cache_.end(); } reverse_iterator rbegin() { return cache_.rbegin(); } const_reverse_iterator rbegin() const { return cache_.rbegin(); } reverse_iterator rend() { return cache_.rend(); } const_reverse_iterator rend() const { return cache_.rend(); } void Insert(const K& key, std::unique_ptr<V> value) { auto it = cache_.find(key); if (it != cache_.end()) { cache_.erase(it); } cache_.emplace(key, std::move(value)); if (cache_.size() > capacity_) { cache_.pop_front(); } QUICHE_DCHECK_LE(cache_.size(), capacity_); } iterator Lookup(const K& key) { auto iter = cache_.find(key); if (iter == cache_.end()) { return iter; } std::unique_ptr<V> value = std::move(iter->second); cache_.erase(iter); auto result = cache_.emplace(key, std::move(value)); QUICHE_DCHECK(result.second); return result.first; } iterator Erase(iterator iter) { return cache_.erase(iter); } void Clear() { cache_.clear(); } size_t MaxSize() const { return capacity_; } size_t Size() const { return cache_.size(); } private: quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq> cache_; const size_t capacity_; }; } #endif
#include "quiche/quic/core/quic_lru_cache.h" #include <memory> #include <utility> #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace test { namespace { struct CachedItem { explicit CachedItem(uint32_t new_value) : value(new_value) {} uint32_t value; }; TEST(QuicLRUCacheTest, InsertAndLookup) { QuicLRUCache<int, CachedItem> cache(5); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(0u, cache.Size()); EXPECT_EQ(5u, cache.MaxSize()); std::unique_ptr<CachedItem> item1(new CachedItem(11)); cache.Insert(1, std::move(item1)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(11u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item2(new CachedItem(12)); cache.Insert(1, std::move(item2)); EXPECT_EQ(1u, cache.Size()); EXPECT_EQ(12u, cache.Lookup(1)->second->value); std::unique_ptr<CachedItem> item3(new CachedItem(13)); cache.Insert(3, std::move(item3)); EXPECT_EQ(2u, cache.Size()); auto iter = cache.Lookup(3); ASSERT_NE(cache.end(), iter); EXPECT_EQ(13u, iter->second->value); cache.Erase(iter); ASSERT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(1u, cache.Size()); cache.Clear(); EXPECT_EQ(0u, cache.Size()); } TEST(QuicLRUCacheTest, Eviction) { QuicLRUCache<int, CachedItem> cache(3); for (size_t i = 1; i <= 4; ++i) { std::unique_ptr<CachedItem> item(new CachedItem(10 + i)); cache.Insert(i, std::move(item)); } EXPECT_EQ(3u, cache.Size()); EXPECT_EQ(3u, cache.MaxSize()); EXPECT_EQ(cache.end(), cache.Lookup(1)); EXPECT_EQ(14u, cache.Lookup(4)->second->value); EXPECT_EQ(12u, cache.Lookup(2)->second->value); std::unique_ptr<CachedItem> item5(new CachedItem(15)); cache.Insert(5, std::move(item5)); EXPECT_EQ(cache.end(), cache.Lookup(3)); EXPECT_EQ(15u, cache.Lookup(5)->second->value); cache.Clear(); EXPECT_EQ(0u, cache.Size()); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache.h
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
1e57cfe2-0c5a-4707-a50b-7bf3a0666fa5
cpp
abseil/abseil-cpp
uniform_helper
absl/random/internal/uniform_helper.h
absl/random/internal/uniform_helper_test.cc
#ifndef ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ #define ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_ #include <cmath> #include <limits> #include <type_traits> #include "absl/base/config.h" #include "absl/meta/type_traits.h" #include "absl/random/internal/traits.h" namespace absl { ABSL_NAMESPACE_BEGIN template <typename IntType> class uniform_int_distribution; template <typename RealType> class uniform_real_distribution; namespace random_internal { template <typename T> struct TagTypeCompare {}; template <typename T> constexpr bool operator==(TagTypeCompare<T>, TagTypeCompare<T>) { return true; } template <typename T> constexpr bool operator!=(TagTypeCompare<T>, TagTypeCompare<T>) { return false; } } struct IntervalClosedClosedTag : public random_internal::TagTypeCompare<IntervalClosedClosedTag> {}; struct IntervalClosedOpenTag : public random_internal::TagTypeCompare<IntervalClosedOpenTag> {}; struct IntervalOpenClosedTag : public random_internal::TagTypeCompare<IntervalOpenClosedTag> {}; struct IntervalOpenOpenTag : public random_internal::TagTypeCompare<IntervalOpenOpenTag> {}; namespace random_internal { template <typename A, typename B> using uniform_inferred_return_t = absl::enable_if_t<absl::disjunction<is_widening_convertible<A, B>, is_widening_convertible<B, A>>::value, typename std::conditional< is_widening_convertible<A, B>::value, B, A>::type>; template <typename IntType, typename Tag> typename absl::enable_if_t< absl::conjunction< IsIntegral<IntType>, absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>, std::is_same<Tag, IntervalOpenOpenTag>>>::value, IntType> uniform_lower_bound(Tag, IntType a, IntType) { return a < (std::numeric_limits<IntType>::max)() ? (a + 1) : a; } template <typename FloatType, typename Tag> typename absl::enable_if_t< absl::conjunction< std::is_floating_point<FloatType>, absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>, std::is_same<Tag, IntervalOpenOpenTag>>>::value, FloatType> uniform_lower_bound(Tag, FloatType a, FloatType b) { return std::nextafter(a, b); } template <typename NumType, typename Tag> typename absl::enable_if_t< absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>, std::is_same<Tag, IntervalClosedOpenTag>>::value, NumType> uniform_lower_bound(Tag, NumType a, NumType) { return a; } template <typename IntType, typename Tag> typename absl::enable_if_t< absl::conjunction< IsIntegral<IntType>, absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>, std::is_same<Tag, IntervalOpenOpenTag>>>::value, IntType> uniform_upper_bound(Tag, IntType, IntType b) { return b > (std::numeric_limits<IntType>::min)() ? (b - 1) : b; } template <typename FloatType, typename Tag> typename absl::enable_if_t< absl::conjunction< std::is_floating_point<FloatType>, absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>, std::is_same<Tag, IntervalOpenOpenTag>>>::value, FloatType> uniform_upper_bound(Tag, FloatType, FloatType b) { return b; } template <typename IntType, typename Tag> typename absl::enable_if_t< absl::conjunction< IsIntegral<IntType>, absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>, std::is_same<Tag, IntervalOpenClosedTag>>>::value, IntType> uniform_upper_bound(Tag, IntType, IntType b) { return b; } template <typename FloatType, typename Tag> typename absl::enable_if_t< absl::conjunction< std::is_floating_point<FloatType>, absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>, std::is_same<Tag, IntervalOpenClosedTag>>>::value, FloatType> uniform_upper_bound(Tag, FloatType, FloatType b) { return std::nextafter(b, (std::numeric_limits<FloatType>::max)()); } template <typename FloatType> absl::enable_if_t<std::is_floating_point<FloatType>::value, bool> is_uniform_range_valid(FloatType a, FloatType b) { return a <= b && std::isfinite(b - a); } template <typename IntType> absl::enable_if_t<IsIntegral<IntType>::value, bool> is_uniform_range_valid(IntType a, IntType b) { return a <= b; } template <typename NumType> using UniformDistribution = typename std::conditional<IsIntegral<NumType>::value, absl::uniform_int_distribution<NumType>, absl::uniform_real_distribution<NumType>>::type; template <typename NumType> struct UniformDistributionWrapper : public UniformDistribution<NumType> { template <typename TagType> explicit UniformDistributionWrapper(TagType, NumType lo, NumType hi) : UniformDistribution<NumType>( uniform_lower_bound<NumType>(TagType{}, lo, hi), uniform_upper_bound<NumType>(TagType{}, lo, hi)) {} explicit UniformDistributionWrapper(NumType lo, NumType hi) : UniformDistribution<NumType>( uniform_lower_bound<NumType>(IntervalClosedOpenTag(), lo, hi), uniform_upper_bound<NumType>(IntervalClosedOpenTag(), lo, hi)) {} explicit UniformDistributionWrapper() : UniformDistribution<NumType>(std::numeric_limits<NumType>::lowest(), (std::numeric_limits<NumType>::max)()) {} }; } ABSL_NAMESPACE_END } #endif
#include "absl/random/internal/uniform_helper.h" #include <cmath> #include <cstdint> #include <random> #include "gtest/gtest.h" namespace { using absl::IntervalClosedClosedTag; using absl::IntervalClosedOpenTag; using absl::IntervalOpenClosedTag; using absl::IntervalOpenOpenTag; using absl::random_internal::uniform_inferred_return_t; using absl::random_internal::uniform_lower_bound; using absl::random_internal::uniform_upper_bound; class UniformHelperTest : public testing::Test {}; TEST_F(UniformHelperTest, UniformBoundFunctionsGeneral) { constexpr IntervalClosedClosedTag IntervalClosedClosed; constexpr IntervalClosedOpenTag IntervalClosedOpen; constexpr IntervalOpenClosedTag IntervalOpenClosed; constexpr IntervalOpenOpenTag IntervalOpenOpen; EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, 0, 100), 1); EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, 0, 100), 1); EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, 0, 1.0), 0); EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, 0, 1.0), 0); EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, 0, 1.0), 0); EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, 0, 1.0), 0); EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 100), 0); EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 100), 0); EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, 0, 1.0), 0); EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, 0, 1.0), 0); EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, 0, 1.0), 0); EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, 0, 1.0), 0); EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 100), 99); EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 100), 99); EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, 0, 1.0), 1.0); EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, 0, 1.0), 1.0); EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, 0, 1.0), 1.0); EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, 0, 1.0), 1.0); EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, 0, 100), 100); EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, 0, 100), 100); EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, 0, 1.0), 1.0); EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, 0, 1.0), 1.0); EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, 0, 1.0), 1.0); EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, 0, 1.0), 1.0); EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, -100, -1), -99); EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, -100, -1), -99); EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, -2.0, -1.0), -2.0); EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, -2.0, -1.0), -2.0); EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, -2.0, -1.0), -2.0); EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, -2.0, -1.0), -2.0); EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -100, -1), -100); EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -100, -1), -100); EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, -2.0, -1.0), -2.0); EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, -2.0, -1.0), -2.0); EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, -2.0, -1.0), -2.0); EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, -2.0, -1.0), -2.0); EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -100, -1), -2); EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -100, -1), -2); EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, -2.0, -1.0), -1.0); EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, -2.0, -1.0), -1.0); EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, -2.0, -1.0), -1.0); EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, -2.0, -1.0), -1.0); EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, -100, -1), -1); EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, -100, -1), -1); EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, -2.0, -1.0), -1.0); EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, -2.0, -1.0), -1.0); EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, -2.0, -1.0), -1.0); EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, -2.0, -1.0), -1.0); EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 1.0, 2.0), 1.0); EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, +0.0), 1.0); EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -0.0), 1.0); EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0), 1.0); } TEST_F(UniformHelperTest, UniformBoundFunctionsIntBounds) { constexpr IntervalOpenOpenTag IntervalOpenOpen; constexpr auto m = (std::numeric_limits<uint64_t>::max)(); EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0u, 0u)); EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m, m)); EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m - 1, m - 1)); EXPECT_EQ(0, uniform_upper_bound(IntervalOpenOpen, 0u, 0u)); EXPECT_EQ(m - 1, uniform_upper_bound(IntervalOpenOpen, m, m)); constexpr auto l = (std::numeric_limits<int64_t>::min)(); constexpr auto r = (std::numeric_limits<int64_t>::max)(); EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0, 0)); EXPECT_EQ(l + 1, uniform_lower_bound(IntervalOpenOpen, l, l)); EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r - 1, r - 1)); EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r, r)); EXPECT_EQ(-1, uniform_upper_bound(IntervalOpenOpen, 0, 0)); EXPECT_EQ(l, uniform_upper_bound(IntervalOpenOpen, l, l)); EXPECT_EQ(r - 1, uniform_upper_bound(IntervalOpenOpen, r, r)); } TEST_F(UniformHelperTest, UniformBoundFunctionsRealBounds) { constexpr IntervalOpenClosedTag IntervalOpenClosed; EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, 1.0, 1.0)); EXPECT_EQ(1.0f, uniform_lower_bound(IntervalOpenClosed, 1.0f, 1.0f)); constexpr auto r = (std::numeric_limits<double>::max)(); const auto re = std::nexttoward(r, 0.0); constexpr auto l = -r; const auto le = std::nexttoward(l, 0.0); EXPECT_EQ(l, uniform_lower_bound(IntervalOpenClosed, l, l)); EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, r, r)); EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, r)); EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, 0.0)); EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, le)); EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, re, r)); EXPECT_EQ(le, uniform_upper_bound(IntervalOpenClosed, l, l)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, r, r)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, r)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, re)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, 0.0, r)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, re, r)); EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, le, re)); const double e = std::nextafter(1.0, 2.0); const double f = std::nextafter(1.0, 0.0); EXPECT_EQ(e, uniform_lower_bound(IntervalOpenClosed, 1.0, e)); EXPECT_EQ(std::nextafter(e, 2.0), uniform_upper_bound(IntervalOpenClosed, 1.0, e)); EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, f, 1.0)); EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, f, 1.0)); const double g = std::numeric_limits<double>::denorm_min(); const double h = std::nextafter(g, 1.0); EXPECT_EQ(g, uniform_lower_bound(IntervalOpenClosed, 0.0, g)); EXPECT_EQ(h, uniform_upper_bound(IntervalOpenClosed, 0.0, g)); EXPECT_EQ(h, uniform_lower_bound(IntervalOpenClosed, g, 1.0)); EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, g, 1.0)); EXPECT_EQ(f, uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0)); } struct Invalid {}; template <typename A, typename B> auto InferredUniformReturnT(int) -> uniform_inferred_return_t<A, B>; template <typename, typename> Invalid InferredUniformReturnT(...); template <typename A, typename B, typename Expect> void CheckArgsInferType() { static_assert( absl::conjunction< std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>, std::is_same<Expect, decltype(InferredUniformReturnT<B, A>(0))>>::value, ""); } TEST_F(UniformHelperTest, UniformTypeInference) { CheckArgsInferType<uint16_t, uint16_t, uint16_t>(); CheckArgsInferType<uint32_t, uint32_t, uint32_t>(); CheckArgsInferType<uint64_t, uint64_t, uint64_t>(); CheckArgsInferType<int16_t, int16_t, int16_t>(); CheckArgsInferType<int32_t, int32_t, int32_t>(); CheckArgsInferType<int64_t, int64_t, int64_t>(); CheckArgsInferType<float, float, float>(); CheckArgsInferType<double, double, double>(); CheckArgsInferType<uint16_t, uint32_t, uint32_t>(); CheckArgsInferType<uint16_t, uint64_t, uint64_t>(); CheckArgsInferType<uint16_t, int32_t, int32_t>(); CheckArgsInferType<uint16_t, int64_t, int64_t>(); CheckArgsInferType<uint16_t, float, float>(); CheckArgsInferType<uint16_t, double, double>(); CheckArgsInferType<int16_t, int32_t, int32_t>(); CheckArgsInferType<int16_t, int64_t, int64_t>(); CheckArgsInferType<int16_t, float, float>(); CheckArgsInferType<int16_t, double, double>(); CheckArgsInferType<uint16_t, int16_t, Invalid>(); CheckArgsInferType<int16_t, uint32_t, Invalid>(); CheckArgsInferType<int16_t, uint64_t, Invalid>(); CheckArgsInferType<uint32_t, uint64_t, uint64_t>(); CheckArgsInferType<uint32_t, int64_t, int64_t>(); CheckArgsInferType<uint32_t, double, double>(); CheckArgsInferType<int32_t, int64_t, int64_t>(); CheckArgsInferType<int32_t, double, double>(); CheckArgsInferType<uint32_t, int32_t, Invalid>(); CheckArgsInferType<int32_t, uint64_t, Invalid>(); CheckArgsInferType<int32_t, float, Invalid>(); CheckArgsInferType<uint32_t, float, Invalid>(); CheckArgsInferType<uint64_t, int64_t, Invalid>(); CheckArgsInferType<int64_t, float, Invalid>(); CheckArgsInferType<int64_t, double, Invalid>(); CheckArgsInferType<float, double, double>(); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper.h
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
d9e7938a-62ab-44dd-9d1c-26c951320ef7
cpp
abseil/abseil-cpp
crc32c
absl/crc/crc32c.cc
absl/crc/crc32c_test.cc
#include "absl/crc/crc32c.h" #include <cstdint> #include "absl/crc/internal/crc.h" #include "absl/crc/internal/crc32c.h" #include "absl/crc/internal/crc_memcpy.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { const crc_internal::CRC* CrcEngine() { static const crc_internal::CRC* engine = crc_internal::CRC::Crc32c(); return engine; } constexpr uint32_t kCRC32Xor = 0xffffffffU; } namespace crc_internal { crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length) { uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor; CrcEngine()->UnextendByZeroes(&crc, length); return static_cast<crc32c_t>(crc ^ kCRC32Xor); } crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc, absl::string_view buf_to_add) { uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor; CrcEngine()->Extend(&crc, buf_to_add.data(), buf_to_add.size()); return static_cast<crc32c_t>(crc ^ kCRC32Xor); } } crc32c_t ComputeCrc32c(absl::string_view buf) { return ExtendCrc32c(crc32c_t{0}, buf); } crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) { uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor; CrcEngine()->ExtendByZeroes(&crc, length); return static_cast<crc32c_t>(crc ^ kCRC32Xor); } crc32c_t ConcatCrc32c(crc32c_t lhs_crc, crc32c_t rhs_crc, size_t rhs_len) { uint32_t result = static_cast<uint32_t>(lhs_crc); CrcEngine()->ExtendByZeroes(&result, rhs_len); return crc32c_t{result ^ static_cast<uint32_t>(rhs_crc)}; } crc32c_t RemoveCrc32cPrefix(crc32c_t crc_a, crc32c_t crc_ab, size_t length_b) { return ConcatCrc32c(crc_a, crc_ab, length_b); } crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count, crc32c_t initial_crc) { return static_cast<crc32c_t>( crc_internal::Crc32CAndCopy(dest, src, count, initial_crc, false)); } crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc, size_t suffix_len) { uint32_t result = static_cast<uint32_t>(full_string_crc) ^ static_cast<uint32_t>(suffix_crc); CrcEngine()->UnextendByZeroes(&result, suffix_len); return crc32c_t{result}; } ABSL_NAMESPACE_END }
#include "absl/crc/crc32c.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <cstring> #include <sstream> #include <string> #include "gtest/gtest.h" #include "absl/crc/internal/crc32c.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" namespace { TEST(CRC32C, RFC3720) { char data[32]; memset(data, 0, sizeof(data)); EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), absl::crc32c_t{0x8a9136aa}); memset(data, 0xff, sizeof(data)); EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), absl::crc32c_t{0x62a8ab43}); for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(i); EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), absl::crc32c_t{0x46dd794e}); for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(31 - i); EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))), absl::crc32c_t{0x113fdb5c}); constexpr uint8_t cmd[48] = { 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; EXPECT_EQ(absl::ComputeCrc32c(absl::string_view( reinterpret_cast<const char*>(cmd), sizeof(cmd))), absl::crc32c_t{0xd9963a56}); } std::string TestString(size_t len) { std::string result; result.reserve(len); for (size_t i = 0; i < len; ++i) { result.push_back(static_cast<char>(i % 256)); } return result; } TEST(CRC32C, Compute) { EXPECT_EQ(absl::ComputeCrc32c(""), absl::crc32c_t{0}); EXPECT_EQ(absl::ComputeCrc32c("hello world"), absl::crc32c_t{0xc99465aa}); } TEST(CRC32C, Extend) { uint32_t base = 0xC99465AA; std::string extension = "Extension String"; EXPECT_EQ( absl::ExtendCrc32c(absl::crc32c_t{base}, extension), absl::crc32c_t{0xD2F65090}); } TEST(CRC32C, ExtendByZeroes) { std::string base = "hello world"; absl::crc32c_t base_crc = absl::crc32c_t{0xc99465aa}; constexpr size_t kExtendByValues[] = {100, 10000, 100000}; for (const size_t extend_by : kExtendByValues) { SCOPED_TRACE(extend_by); absl::crc32c_t crc2 = absl::ExtendCrc32cByZeroes(base_crc, extend_by); EXPECT_EQ(crc2, absl::ComputeCrc32c(base + std::string(extend_by, '\0'))); } } TEST(CRC32C, UnextendByZeroes) { constexpr size_t kExtendByValues[] = {2, 200, 20000, 200000, 20000000}; constexpr size_t kUnextendByValues[] = {0, 100, 10000, 100000, 10000000}; for (auto seed_crc : {absl::crc32c_t{0}, absl::crc32c_t{0xc99465aa}}) { SCOPED_TRACE(seed_crc); for (const size_t size_1 : kExtendByValues) { for (const size_t size_2 : kUnextendByValues) { size_t extend_size = std::max(size_1, size_2); size_t unextend_size = std::min(size_1, size_2); SCOPED_TRACE(extend_size); SCOPED_TRACE(unextend_size); absl::crc32c_t crc1 = seed_crc; crc1 = absl::ExtendCrc32cByZeroes(crc1, extend_size); crc1 = absl::crc_internal::UnextendCrc32cByZeroes(crc1, unextend_size); absl::crc32c_t crc2 = seed_crc; crc2 = absl::ExtendCrc32cByZeroes(crc2, extend_size - unextend_size); EXPECT_EQ(crc1, crc2); } } } constexpr size_t kSizes[] = {0, 1, 100, 10000}; for (const size_t size : kSizes) { SCOPED_TRACE(size); std::string string_before = TestString(size); std::string string_after = string_before + std::string(size, '\0'); absl::crc32c_t crc_before = absl::ComputeCrc32c(string_before); absl::crc32c_t crc_after = absl::ComputeCrc32c(string_after); EXPECT_EQ(crc_before, absl::crc_internal::UnextendCrc32cByZeroes(crc_after, size)); } } TEST(CRC32C, Concat) { std::string hello = "Hello, "; std::string world = "world!"; std::string hello_world = absl::StrCat(hello, world); absl::crc32c_t crc_a = absl::ComputeCrc32c(hello); absl::crc32c_t crc_b = absl::ComputeCrc32c(world); absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world); EXPECT_EQ(absl::ConcatCrc32c(crc_a, crc_b, world.size()), crc_ab); } TEST(CRC32C, Memcpy) { constexpr size_t kBytesSize[] = {0, 1, 20, 500, 100000}; for (size_t bytes : kBytesSize) { SCOPED_TRACE(bytes); std::string sample_string = TestString(bytes); std::string target_buffer = std::string(bytes, '\0'); absl::crc32c_t memcpy_crc = absl::MemcpyCrc32c(&(target_buffer[0]), sample_string.data(), bytes); absl::crc32c_t compute_crc = absl::ComputeCrc32c(sample_string); EXPECT_EQ(memcpy_crc, compute_crc); EXPECT_EQ(sample_string, target_buffer); } } TEST(CRC32C, RemovePrefix) { std::string hello = "Hello, "; std::string world = "world!"; std::string hello_world = absl::StrCat(hello, world); absl::crc32c_t crc_a = absl::ComputeCrc32c(hello); absl::crc32c_t crc_b = absl::ComputeCrc32c(world); absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world); EXPECT_EQ(absl::RemoveCrc32cPrefix(crc_a, crc_ab, world.size()), crc_b); } TEST(CRC32C, RemoveSuffix) { std::string hello = "Hello, "; std::string world = "world!"; std::string hello_world = absl::StrCat(hello, world); absl::crc32c_t crc_a = absl::ComputeCrc32c(hello); absl::crc32c_t crc_b = absl::ComputeCrc32c(world); absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world); EXPECT_EQ(absl::RemoveCrc32cSuffix(crc_ab, crc_b, world.size()), crc_a); } TEST(CRC32C, InsertionOperator) { { std::ostringstream buf; buf << absl::crc32c_t{0xc99465aa}; EXPECT_EQ(buf.str(), "c99465aa"); } { std::ostringstream buf; buf << absl::crc32c_t{0}; EXPECT_EQ(buf.str(), "00000000"); } { std::ostringstream buf; buf << absl::crc32c_t{17}; EXPECT_EQ(buf.str(), "00000011"); } } TEST(CRC32C, AbslStringify) { EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0xc99465aa}), "c99465aa"); EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0}), "00000000"); EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{17}), "00000011"); EXPECT_EQ(absl::StrCat(absl::crc32c_t{0xc99465aa}), "c99465aa"); EXPECT_EQ(absl::StrCat(absl::crc32c_t{0}), "00000000"); EXPECT_EQ(absl::StrCat(absl::crc32c_t{17}), "00000011"); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
ffc07e61-07c7-4b48-aa1e-c9a582db6910
cpp
google/cel-cpp
standard_library
checker/standard_library.cc
checker/standard_library_test.cc
#include "checker/standard_library.h" #include <string> #include <utility> #include "absl/base/no_destructor.h" #include "absl/status/status.h" #include "base/builtins.h" #include "checker/internal/builtins_arena.h" #include "checker/type_checker_builder.h" #include "common/constant.h" #include "common/decl.h" #include "common/type.h" #include "internal/status_macros.h" namespace cel { namespace { using ::cel::checker_internal::BuiltinsArena; TypeParamType TypeParamA() { return TypeParamType("A"); } TypeParamType TypeParamB() { return TypeParamType("B"); } Type ListOfA() { static absl::NoDestructor<Type> kInstance( ListType(BuiltinsArena(), TypeParamA())); return *kInstance; } Type MapOfAB() { static absl::NoDestructor<Type> kInstance( MapType(BuiltinsArena(), TypeParamA(), TypeParamB())); return *kInstance; } Type TypeOfA() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), TypeParamA())); return *kInstance; } Type TypeNullType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), NullType())); return *kInstance; } Type TypeBoolType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), BoolType())); return *kInstance; } Type TypeIntType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), IntType())); return *kInstance; } Type TypeUintType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), UintType())); return *kInstance; } Type TypeDoubleType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), DoubleType())); return *kInstance; } Type TypeStringType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), StringType())); return *kInstance; } Type TypeBytesType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), BytesType())); return *kInstance; } Type TypeDurationType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), DurationType())); return *kInstance; } Type TypeTimestampType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), TimestampType())); return *kInstance; } Type TypeListType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), ListOfA())); return *kInstance; } Type TypeMapType() { static absl::NoDestructor<Type> kInstance( TypeType(BuiltinsArena(), MapOfAB())); return *kInstance; } class StandardOverloads { public: static constexpr char kAddInt[] = "add_int64"; static constexpr char kAddUint[] = "add_uint64"; static constexpr char kAddDouble[] = "add_double"; static constexpr char kAddDurationDuration[] = "add_duration_duration"; static constexpr char kAddDurationTimestamp[] = "add_duration_timestamp"; static constexpr char kAddTimestampDuration[] = "add_timestamp_duration"; static constexpr char kAddString[] = "add_string"; static constexpr char kAddBytes[] = "add_bytes"; static constexpr char kAddList[] = "add_list"; static constexpr char kSubtractInt[] = "subtract_int64"; static constexpr char kSubtractUint[] = "subtract_uint64"; static constexpr char kSubtractDouble[] = "subtract_double"; static constexpr char kSubtractDurationDuration[] = "subtract_duration_duration"; static constexpr char kSubtractTimestampDuration[] = "subtract_timestamp_duration"; static constexpr char kSubtractTimestampTimestamp[] = "subtract_timestamp_timestamp"; static constexpr char kMultiplyInt[] = "multiply_int64"; static constexpr char kMultiplyUint[] = "multiply_uint64"; static constexpr char kMultiplyDouble[] = "multiply_double"; static constexpr char kDivideInt[] = "divide_int64"; static constexpr char kDivideUint[] = "divide_uint64"; static constexpr char kDivideDouble[] = "divide_double"; static constexpr char kModuloInt[] = "modulo_int64"; static constexpr char kModuloUint[] = "modulo_uint64"; static constexpr char kNegateInt[] = "negate_int64"; static constexpr char kNegateDouble[] = "negate_double"; static constexpr char kNot[] = "logical_not"; static constexpr char kAnd[] = "logical_and"; static constexpr char kOr[] = "logical_or"; static constexpr char kConditional[] = "conditional"; static constexpr char kNotStrictlyFalse[] = "not_strictly_false"; static constexpr char kNotStrictlyFalseDeprecated[] = "__not_strictly_false__"; static constexpr char kEquals[] = "equals"; static constexpr char kNotEquals[] = "not_equals"; static constexpr char kLessBool[] = "less_bool"; static constexpr char kLessString[] = "less_string"; static constexpr char kLessBytes[] = "less_bytes"; static constexpr char kLessDuration[] = "less_duration"; static constexpr char kLessTimestamp[] = "less_timestamp"; static constexpr char kLessInt[] = "less_int64"; static constexpr char kLessIntUint[] = "less_int64_uint64"; static constexpr char kLessIntDouble[] = "less_int64_double"; static constexpr char kLessDouble[] = "less_double"; static constexpr char kLessDoubleInt[] = "less_double_int64"; static constexpr char kLessDoubleUint[] = "less_double_uint64"; static constexpr char kLessUint[] = "less_uint64"; static constexpr char kLessUintInt[] = "less_uint64_int64"; static constexpr char kLessUintDouble[] = "less_uint64_double"; static constexpr char kGreaterBool[] = "greater_bool"; static constexpr char kGreaterString[] = "greater_string"; static constexpr char kGreaterBytes[] = "greater_bytes"; static constexpr char kGreaterDuration[] = "greater_duration"; static constexpr char kGreaterTimestamp[] = "greater_timestamp"; static constexpr char kGreaterInt[] = "greater_int64"; static constexpr char kGreaterIntUint[] = "greater_int64_uint64"; static constexpr char kGreaterIntDouble[] = "greater_int64_double"; static constexpr char kGreaterDouble[] = "greater_double"; static constexpr char kGreaterDoubleInt[] = "greater_double_int64"; static constexpr char kGreaterDoubleUint[] = "greater_double_uint64"; static constexpr char kGreaterUint[] = "greater_uint64"; static constexpr char kGreaterUintInt[] = "greater_uint64_int64"; static constexpr char kGreaterUintDouble[] = "greater_uint64_double"; static constexpr char kGreaterEqualsBool[] = "greater_equals_bool"; static constexpr char kGreaterEqualsString[] = "greater_equals_string"; static constexpr char kGreaterEqualsBytes[] = "greater_equals_bytes"; static constexpr char kGreaterEqualsDuration[] = "greater_equals_duration"; static constexpr char kGreaterEqualsTimestamp[] = "greater_equals_timestamp"; static constexpr char kGreaterEqualsInt[] = "greater_equals_int64"; static constexpr char kGreaterEqualsIntUint[] = "greater_equals_int64_uint64"; static constexpr char kGreaterEqualsIntDouble[] = "greater_equals_int64_double"; static constexpr char kGreaterEqualsDouble[] = "greater_equals_double"; static constexpr char kGreaterEqualsDoubleInt[] = "greater_equals_double_int64"; static constexpr char kGreaterEqualsDoubleUint[] = "greater_equals_double_uint64"; static constexpr char kGreaterEqualsUint[] = "greater_equals_uint64"; static constexpr char kGreaterEqualsUintInt[] = "greater_equals_uint64_int64"; static constexpr char kGreaterEqualsUintDouble[] = "greater_equals_uint_double"; static constexpr char kLessEqualsBool[] = "less_equals_bool"; static constexpr char kLessEqualsString[] = "less_equals_string"; static constexpr char kLessEqualsBytes[] = "less_equals_bytes"; static constexpr char kLessEqualsDuration[] = "less_equals_duration"; static constexpr char kLessEqualsTimestamp[] = "less_equals_timestamp"; static constexpr char kLessEqualsInt[] = "less_equals_int64"; static constexpr char kLessEqualsIntUint[] = "less_equals_int64_uint64"; static constexpr char kLessEqualsIntDouble[] = "less_equals_int64_double"; static constexpr char kLessEqualsDouble[] = "less_equals_double"; static constexpr char kLessEqualsDoubleInt[] = "less_equals_double_int64"; static constexpr char kLessEqualsDoubleUint[] = "less_equals_double_uint64"; static constexpr char kLessEqualsUint[] = "less_equals_uint64"; static constexpr char kLessEqualsUintInt[] = "less_equals_uint64_int64"; static constexpr char kLessEqualsUintDouble[] = "less_equals_uint64_double"; static constexpr char kIndexList[] = "index_list"; static constexpr char kIndexMap[] = "index_map"; static constexpr char kInList[] = "in_list"; static constexpr char kInMap[] = "in_map"; static constexpr char kSizeBytes[] = "size_bytes"; static constexpr char kSizeList[] = "size_list"; static constexpr char kSizeMap[] = "size_map"; static constexpr char kSizeString[] = "size_string"; static constexpr char kSizeBytesMember[] = "bytes_size"; static constexpr char kSizeListMember[] = "list_size"; static constexpr char kSizeMapMember[] = "map_size"; static constexpr char kSizeStringMember[] = "string_size"; static constexpr char kContainsString[] = "contains_string"; static constexpr char kEndsWithString[] = "ends_with_string"; static constexpr char kStartsWithString[] = "starts_with_string"; static constexpr char kMatches[] = "matches"; static constexpr char kMatchesMember[] = "matches_string"; static constexpr char kTimestampToYear[] = "timestamp_to_year"; static constexpr char kTimestampToYearWithTz[] = "timestamp_to_year_with_tz"; static constexpr char kTimestampToMonth[] = "timestamp_to_month"; static constexpr char kTimestampToMonthWithTz[] = "timestamp_to_month_with_tz"; static constexpr char kTimestampToDayOfYear[] = "timestamp_to_day_of_year"; static constexpr char kTimestampToDayOfYearWithTz[] = "timestamp_to_day_of_year_with_tz"; static constexpr char kTimestampToDayOfMonth[] = "timestamp_to_day_of_month"; static constexpr char kTimestampToDayOfMonthWithTz[] = "timestamp_to_day_of_month_with_tz"; static constexpr char kTimestampToDayOfWeek[] = "timestamp_to_day_of_week"; static constexpr char kTimestampToDayOfWeekWithTz[] = "timestamp_to_day_of_week_with_tz"; static constexpr char kTimestampToDate[] = "timestamp_to_day_of_month_1_based"; static constexpr char kTimestampToDateWithTz[] = "timestamp_to_day_of_month_1_based_with_tz"; static constexpr char kTimestampToHours[] = "timestamp_to_hours"; static constexpr char kTimestampToHoursWithTz[] = "timestamp_to_hours_with_tz"; static constexpr char kDurationToHours[] = "duration_to_hours"; static constexpr char kTimestampToMinutes[] = "timestamp_to_minutes"; static constexpr char kTimestampToMinutesWithTz[] = "timestamp_to_minutes_with_tz"; static constexpr char kDurationToMinutes[] = "duration_to_minutes"; static constexpr char kTimestampToSeconds[] = "timestamp_to_seconds"; static constexpr char kTimestampToSecondsWithTz[] = "timestamp_to_seconds_tz"; static constexpr char kDurationToSeconds[] = "duration_to_seconds"; static constexpr char kTimestampToMilliseconds[] = "timestamp_to_milliseconds"; static constexpr char kTimestampToMillisecondsWithTz[] = "timestamp_to_milliseconds_with_tz"; static constexpr char kDurationToMilliseconds[] = "duration_to_milliseconds"; static constexpr char kToDyn[] = "to_dyn"; static constexpr char kUintToUint[] = "uint64_to_uint64"; static constexpr char kDoubleToUint[] = "double_to_uint64"; static constexpr char kIntToUint[] = "int64_to_uint64"; static constexpr char kStringToUint[] = "string_to_uint64"; static constexpr char kUintToInt[] = "uint64_to_int64"; static constexpr char kDoubleToInt[] = "double_to_int64"; static constexpr char kIntToInt[] = "int64_to_int64"; static constexpr char kStringToInt[] = "string_to_int64"; static constexpr char kTimestampToInt[] = "timestamp_to_int64"; static constexpr char kDurationToInt[] = "duration_to_int64"; static constexpr char kDoubleToDouble[] = "double_to_double"; static constexpr char kUintToDouble[] = "uint64_to_double"; static constexpr char kIntToDouble[] = "int64_to_double"; static constexpr char kStringToDouble[] = "string_to_double"; static constexpr char kBoolToBool[] = "bool_to_bool"; static constexpr char kStringToBool[] = "string_to_bool"; static constexpr char kBytesToBytes[] = "bytes_to_bytes"; static constexpr char kStringToBytes[] = "string_to_bytes"; static constexpr char kStringToString[] = "string_to_string"; static constexpr char kBytesToString[] = "bytes_to_string"; static constexpr char kBoolToString[] = "bool_to_string"; static constexpr char kDoubleToString[] = "double_to_string"; static constexpr char kIntToString[] = "int64_to_string"; static constexpr char kUintToString[] = "uint64_to_string"; static constexpr char kDurationToString[] = "duration_to_string"; static constexpr char kTimestampToString[] = "timestamp_to_string"; static constexpr char kTimestampToTimestamp[] = "timestamp_to_timestamp"; static constexpr char kIntToTimestamp[] = "int64_to_timestamp"; static constexpr char kStringToTimestamp[] = "string_to_timestamp"; static constexpr char kDurationToDuration[] = "duration_to_duration"; static constexpr char kIntToDuration[] = "int64_to_duration"; static constexpr char kStringToDuration[] = "string_to_duration"; static constexpr char kToType[] = "type"; }; absl::Status AddArithmeticOps(TypeCheckerBuilder& builder) { FunctionDecl add_op; add_op.set_name(builtin::kAdd); CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl( StandardOverloads::kAddInt, IntType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload( MakeOverloadDecl(StandardOverloads::kAddDouble, DoubleType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl( StandardOverloads::kAddUint, UintType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload( MakeOverloadDecl(StandardOverloads::kAddDurationDuration, DurationType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload( MakeOverloadDecl(StandardOverloads::kAddDurationTimestamp, TimestampType(), DurationType(), TimestampType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload( MakeOverloadDecl(StandardOverloads::kAddTimestampDuration, TimestampType(), TimestampType(), DurationType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl( StandardOverloads::kAddBytes, BytesType(), BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload( MakeOverloadDecl(StandardOverloads::kAddString, StringType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl( StandardOverloads::kAddList, ListOfA(), ListOfA(), ListOfA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(add_op))); FunctionDecl subtract_op; subtract_op.set_name(builtin::kSubtract); CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl( StandardOverloads::kSubtractInt, IntType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl( StandardOverloads::kSubtractUint, UintType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(subtract_op.AddOverload( MakeOverloadDecl(StandardOverloads::kSubtractDouble, DoubleType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(subtract_op.AddOverload( MakeOverloadDecl(StandardOverloads::kSubtractDurationDuration, DurationType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(subtract_op.AddOverload( MakeOverloadDecl(StandardOverloads::kSubtractTimestampDuration, TimestampType(), TimestampType(), DurationType()))); CEL_RETURN_IF_ERROR(subtract_op.AddOverload( MakeOverloadDecl(StandardOverloads::kSubtractTimestampTimestamp, DurationType(), TimestampType(), TimestampType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(subtract_op))); FunctionDecl multiply_op; multiply_op.set_name(builtin::kMultiply); CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl( StandardOverloads::kMultiplyInt, IntType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl( StandardOverloads::kMultiplyUint, UintType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(multiply_op.AddOverload( MakeOverloadDecl(StandardOverloads::kMultiplyDouble, DoubleType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(multiply_op))); FunctionDecl division_op; division_op.set_name(builtin::kDivide); CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl( StandardOverloads::kDivideInt, IntType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl( StandardOverloads::kDivideUint, UintType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(division_op.AddOverload( MakeOverloadDecl(StandardOverloads::kDivideDouble, DoubleType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(division_op))); FunctionDecl modulo_op; modulo_op.set_name(builtin::kModulo); CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl( StandardOverloads::kModuloInt, IntType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl( StandardOverloads::kModuloUint, UintType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(modulo_op))); FunctionDecl negate_op; negate_op.set_name(builtin::kNeg); CEL_RETURN_IF_ERROR(negate_op.AddOverload( MakeOverloadDecl(StandardOverloads::kNegateInt, IntType(), IntType()))); CEL_RETURN_IF_ERROR(negate_op.AddOverload(MakeOverloadDecl( StandardOverloads::kNegateDouble, DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(negate_op))); return absl::OkStatus(); } absl::Status AddLogicalOps(TypeCheckerBuilder& builder) { FunctionDecl not_op; not_op.set_name(builtin::kNot); CEL_RETURN_IF_ERROR(not_op.AddOverload( MakeOverloadDecl(StandardOverloads::kNot, BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_op))); FunctionDecl and_op; and_op.set_name(builtin::kAnd); CEL_RETURN_IF_ERROR(and_op.AddOverload(MakeOverloadDecl( StandardOverloads::kAnd, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(and_op))); FunctionDecl or_op; or_op.set_name(builtin::kOr); CEL_RETURN_IF_ERROR(or_op.AddOverload(MakeOverloadDecl( StandardOverloads::kOr, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(or_op))); FunctionDecl conditional_op; conditional_op.set_name(builtin::kTernary); CEL_RETURN_IF_ERROR(conditional_op.AddOverload( MakeOverloadDecl(StandardOverloads::kConditional, TypeParamA(), BoolType(), TypeParamA(), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(conditional_op))); FunctionDecl not_strictly_false; not_strictly_false.set_name(builtin::kNotStrictlyFalse); CEL_RETURN_IF_ERROR(not_strictly_false.AddOverload(MakeOverloadDecl( StandardOverloads::kNotStrictlyFalse, BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_strictly_false))); FunctionDecl not_strictly_false_deprecated; not_strictly_false_deprecated.set_name(builtin::kNotStrictlyFalseDeprecated); CEL_RETURN_IF_ERROR(not_strictly_false_deprecated.AddOverload( MakeOverloadDecl(StandardOverloads::kNotStrictlyFalseDeprecated, BoolType(), BoolType()))); CEL_RETURN_IF_ERROR( builder.AddFunction(std::move(not_strictly_false_deprecated))); return absl::OkStatus(); } absl::Status AddTypeConversions(TypeCheckerBuilder& builder) { FunctionDecl to_dyn; to_dyn.set_name(builtin::kDyn); CEL_RETURN_IF_ERROR(to_dyn.AddOverload( MakeOverloadDecl(StandardOverloads::kToDyn, DynType(), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_dyn))); FunctionDecl to_uint; to_uint.set_name(builtin::kUint); CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl( StandardOverloads::kUintToUint, UintType(), UintType()))); CEL_RETURN_IF_ERROR(to_uint.AddOverload( MakeOverloadDecl(StandardOverloads::kIntToUint, UintType(), IntType()))); CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl( StandardOverloads::kDoubleToUint, UintType(), DoubleType()))); CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToUint, UintType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_uint))); FunctionDecl to_int; to_int.set_name(builtin::kInt); CEL_RETURN_IF_ERROR(to_int.AddOverload( MakeOverloadDecl(StandardOverloads::kIntToInt, IntType(), IntType()))); CEL_RETURN_IF_ERROR(to_int.AddOverload( MakeOverloadDecl(StandardOverloads::kUintToInt, IntType(), UintType()))); CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl( StandardOverloads::kDoubleToInt, IntType(), DoubleType()))); CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToInt, IntType(), StringType()))); CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl( StandardOverloads::kTimestampToInt, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl( StandardOverloads::kDurationToInt, IntType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_int))); FunctionDecl to_double; to_double.set_name(builtin::kDouble); CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl( StandardOverloads::kDoubleToDouble, DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl( StandardOverloads::kIntToDouble, DoubleType(), IntType()))); CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl( StandardOverloads::kUintToDouble, DoubleType(), UintType()))); CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToDouble, DoubleType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_double))); FunctionDecl to_bool; to_bool.set_name("bool"); CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl( StandardOverloads::kBoolToBool, BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToBool, BoolType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bool))); FunctionDecl to_string; to_string.set_name(builtin::kString); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToString, StringType(), StringType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kBytesToString, StringType(), BytesType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kBoolToString, StringType(), BoolType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kDoubleToString, StringType(), DoubleType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kIntToString, StringType(), IntType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kUintToString, StringType(), UintType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kTimestampToString, StringType(), TimestampType()))); CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl( StandardOverloads::kDurationToString, StringType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_string))); FunctionDecl to_bytes; to_bytes.set_name(builtin::kBytes); CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl( StandardOverloads::kBytesToBytes, BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToBytes, BytesType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bytes))); FunctionDecl to_timestamp; to_timestamp.set_name(builtin::kTimestamp); CEL_RETURN_IF_ERROR(to_timestamp.AddOverload( MakeOverloadDecl(StandardOverloads::kTimestampToTimestamp, TimestampType(), TimestampType()))); CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToTimestamp, TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl( StandardOverloads::kIntToTimestamp, TimestampType(), IntType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_timestamp))); FunctionDecl to_duration; to_duration.set_name(builtin::kDuration); CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl( StandardOverloads::kDurationToDuration, DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl( StandardOverloads::kStringToDuration, DurationType(), StringType()))); CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl( StandardOverloads::kIntToDuration, DurationType(), IntType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_duration))); FunctionDecl to_type; to_type.set_name(builtin::kType); CEL_RETURN_IF_ERROR(to_type.AddOverload(MakeOverloadDecl( StandardOverloads::kToType, Type(TypeOfA()), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_type))); return absl::OkStatus(); } absl::Status AddEqualityOps(TypeCheckerBuilder& builder) { FunctionDecl equals_op; equals_op.set_name(builtin::kEqual); CEL_RETURN_IF_ERROR(equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kEquals, BoolType(), TypeParamA(), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(equals_op))); FunctionDecl not_equals_op; not_equals_op.set_name(builtin::kInequal); CEL_RETURN_IF_ERROR(not_equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kNotEquals, BoolType(), TypeParamA(), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_equals_op))); return absl::OkStatus(); } absl::Status AddConatainerOps(TypeCheckerBuilder& builder) { FunctionDecl index; index.set_name(builtin::kIndex); CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl( StandardOverloads::kIndexList, TypeParamA(), ListOfA(), IntType()))); CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl( StandardOverloads::kIndexMap, TypeParamB(), MapOfAB(), TypeParamA()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(index))); FunctionDecl in_op; in_op.set_name(builtin::kIn); CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl( StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA()))); CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl( StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op))); FunctionDecl in_function_deprecated; in_function_deprecated.set_name(builtin::kInFunction); CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl( StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA()))); CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl( StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_function_deprecated))); FunctionDecl in_op_deprecated; in_op_deprecated.set_name(builtin::kInDeprecated); CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl( StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA()))); CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl( StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op_deprecated))); FunctionDecl size; size.set_name(builtin::kSize); CEL_RETURN_IF_ERROR(size.AddOverload( MakeOverloadDecl(StandardOverloads::kSizeList, IntType(), ListOfA()))); CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kSizeListMember, IntType(), ListOfA()))); CEL_RETURN_IF_ERROR(size.AddOverload( MakeOverloadDecl(StandardOverloads::kSizeMap, IntType(), MapOfAB()))); CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kSizeMapMember, IntType(), MapOfAB()))); CEL_RETURN_IF_ERROR(size.AddOverload( MakeOverloadDecl(StandardOverloads::kSizeBytes, IntType(), BytesType()))); CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kSizeBytesMember, IntType(), BytesType()))); CEL_RETURN_IF_ERROR(size.AddOverload(MakeOverloadDecl( StandardOverloads::kSizeString, IntType(), StringType()))); CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kSizeStringMember, IntType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(size))); return absl::OkStatus(); } absl::Status AddRelationOps(TypeCheckerBuilder& builder) { FunctionDecl less_op; less_op.set_name(builtin::kLess); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessInt, BoolType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessUint, BoolType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessDouble, BoolType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessBool, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessBytes, BoolType(), BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessDuration, BoolType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessTimestamp, BoolType(), TimestampType(), TimestampType()))); FunctionDecl greater_op; greater_op.set_name(builtin::kGreater); CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl( StandardOverloads::kGreaterInt, BoolType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl( StandardOverloads::kGreaterUint, BoolType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterDouble, BoolType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl( StandardOverloads::kGreaterBool, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl( StandardOverloads::kGreaterBytes, BoolType(), BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterDuration, BoolType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterTimestamp, BoolType(), TimestampType(), TimestampType()))); FunctionDecl less_equals_op; less_equals_op.set_name(builtin::kLessOrEqual); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessEqualsInt, BoolType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessEqualsUint, BoolType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsDouble, BoolType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessEqualsBool, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsBytes, BoolType(), BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsDuration, BoolType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsTimestamp, BoolType(), TimestampType(), TimestampType()))); FunctionDecl greater_equals_op; greater_equals_op.set_name(builtin::kGreaterOrEqual); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(MakeOverloadDecl( StandardOverloads::kGreaterEqualsInt, BoolType(), IntType(), IntType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsUint, BoolType(), UintType(), UintType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsDouble, BoolType(), DoubleType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsBool, BoolType(), BoolType(), BoolType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsBytes, BoolType(), BytesType(), BytesType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsDuration, BoolType(), DurationType(), DurationType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsTimestamp, BoolType(), TimestampType(), TimestampType()))); if (builder.options().enable_cross_numeric_comparisons) { CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessIntUint, BoolType(), IntType(), UintType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessIntDouble, BoolType(), IntType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl( StandardOverloads::kLessUintInt, BoolType(), UintType(), IntType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessUintDouble, BoolType(), UintType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessDoubleInt, BoolType(), DoubleType(), IntType()))); CEL_RETURN_IF_ERROR(less_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessDoubleUint, BoolType(), DoubleType(), UintType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterIntUint, BoolType(), IntType(), UintType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterIntDouble, BoolType(), IntType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterUintInt, BoolType(), UintType(), IntType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterUintDouble, BoolType(), UintType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterDoubleInt, BoolType(), DoubleType(), IntType()))); CEL_RETURN_IF_ERROR(greater_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterDoubleUint, BoolType(), DoubleType(), UintType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsIntUint, BoolType(), IntType(), UintType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsIntDouble, BoolType(), IntType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsUintInt, BoolType(), UintType(), IntType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsUintDouble, BoolType(), UintType(), DoubleType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleInt, BoolType(), DoubleType(), IntType()))); CEL_RETURN_IF_ERROR(less_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleUint, BoolType(), DoubleType(), UintType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntUint, BoolType(), IntType(), UintType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntDouble, BoolType(), IntType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintInt, BoolType(), UintType(), IntType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintDouble, BoolType(), UintType(), DoubleType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleInt, BoolType(), DoubleType(), IntType()))); CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload( MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleUint, BoolType(), DoubleType(), UintType()))); } CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_op))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_op))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_equals_op))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_equals_op))); return absl::OkStatus(); } absl::Status AddStringFunctions(TypeCheckerBuilder& builder) { FunctionDecl contains; contains.set_name(builtin::kStringContains); CEL_RETURN_IF_ERROR(contains.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kContainsString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(contains))); FunctionDecl starts_with; starts_with.set_name(builtin::kStringStartsWith); CEL_RETURN_IF_ERROR(starts_with.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kStartsWithString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(starts_with))); FunctionDecl ends_with; ends_with.set_name(builtin::kStringEndsWith); CEL_RETURN_IF_ERROR(ends_with.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kEndsWithString, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(ends_with))); return absl::OkStatus(); } absl::Status AddRegexFunctions(TypeCheckerBuilder& builder) { FunctionDecl matches; matches.set_name(builtin::kRegexMatch); CEL_RETURN_IF_ERROR(matches.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kMatchesMember, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(matches.AddOverload(MakeOverloadDecl( StandardOverloads::kMatches, BoolType(), StringType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(matches))); return absl::OkStatus(); } absl::Status AddTimeFunctions(TypeCheckerBuilder& builder) { FunctionDecl get_full_year; get_full_year.set_name(builtin::kFullYear); CEL_RETURN_IF_ERROR(get_full_year.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToYear, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_full_year.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToYearWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_full_year))); FunctionDecl get_month; get_month.set_name(builtin::kMonth); CEL_RETURN_IF_ERROR(get_month.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToMonth, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_month.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToMonthWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_month))); FunctionDecl get_day_of_year; get_day_of_year.set_name(builtin::kDayOfYear); CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToDayOfYear, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfYearWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_year))); FunctionDecl get_day_of_month; get_day_of_month.set_name(builtin::kDayOfMonth); CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToDayOfMonth, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfMonthWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_month))); FunctionDecl get_date; get_date.set_name(builtin::kDate); CEL_RETURN_IF_ERROR(get_date.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToDate, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_date.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToDateWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_date))); FunctionDecl get_day_of_week; get_day_of_week.set_name(builtin::kDayOfWeek); CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToDayOfWeek, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfWeekWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_week))); FunctionDecl get_hours; get_hours.set_name(builtin::kHours); CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToHours, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_hours.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToHoursWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kDurationToHours, IntType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_hours))); FunctionDecl get_minutes; get_minutes.set_name(builtin::kMinutes); CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToMinutes, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_minutes.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToMinutesWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kDurationToMinutes, IntType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_minutes))); FunctionDecl get_seconds; get_seconds.set_name(builtin::kSeconds); CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kTimestampToSeconds, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_seconds.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToSecondsWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kDurationToSeconds, IntType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_seconds))); FunctionDecl get_milliseconds; get_milliseconds.set_name(builtin::kMilliseconds); CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToMilliseconds, IntType(), TimestampType()))); CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload( MakeMemberOverloadDecl(StandardOverloads::kTimestampToMillisecondsWithTz, IntType(), TimestampType(), StringType()))); CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(MakeMemberOverloadDecl( StandardOverloads::kDurationToMilliseconds, IntType(), DurationType()))); CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_milliseconds))); return absl::OkStatus(); } absl::Status AddTypeConstantVariables(TypeCheckerBuilder& builder) { CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl("bool", TypeBoolType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl("null_type", TypeNullType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl(builtin::kInt, TypeIntType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl(builtin::kUint, TypeUintType()))); CEL_RETURN_IF_ERROR(builder.AddVariable( MakeVariableDecl(builtin::kDouble, TypeDoubleType()))); CEL_RETURN_IF_ERROR(builder.AddVariable( MakeVariableDecl(builtin::kString, TypeStringType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl(builtin::kBytes, TypeBytesType()))); CEL_RETURN_IF_ERROR(builder.AddVariable( MakeVariableDecl(builtin::kDuration, TypeDurationType()))); CEL_RETURN_IF_ERROR(builder.AddVariable( MakeVariableDecl(builtin::kTimestamp, TypeTimestampType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl("list", TypeListType()))); CEL_RETURN_IF_ERROR( builder.AddVariable(MakeVariableDecl("map", TypeMapType()))); CEL_RETURN_IF_ERROR(builder.AddVariable(MakeVariableDecl("type", TypeOfA()))); return absl::OkStatus(); } absl::Status AddEnumConstants(TypeCheckerBuilder& builder) { VariableDecl pb_null; pb_null.set_name("google.protobuf.NullValue.NULL_VALUE"); pb_null.set_type(NullType()); pb_null.set_value(Constant(nullptr)); CEL_RETURN_IF_ERROR(builder.AddVariable(std::move(pb_null))); return absl::OkStatus(); } absl::Status AddStandardLibraryDecls(TypeCheckerBuilder& builder) { CEL_RETURN_IF_ERROR(AddLogicalOps(builder)); CEL_RETURN_IF_ERROR(AddArithmeticOps(builder)); CEL_RETURN_IF_ERROR(AddTypeConversions(builder)); CEL_RETURN_IF_ERROR(AddEqualityOps(builder)); CEL_RETURN_IF_ERROR(AddConatainerOps(builder)); CEL_RETURN_IF_ERROR(AddRelationOps(builder)); CEL_RETURN_IF_ERROR(AddStringFunctions(builder)); CEL_RETURN_IF_ERROR(AddRegexFunctions(builder)); CEL_RETURN_IF_ERROR(AddTimeFunctions(builder)); CEL_RETURN_IF_ERROR(AddTypeConstantVariables(builder)); CEL_RETURN_IF_ERROR(AddEnumConstants(builder)); return absl::OkStatus(); } } CheckerLibrary StandardLibrary() { return {"stdlib", AddStandardLibraryDecls}; } }
#include "checker/standard_library.h" #include <memory> #include <string> #include <utility> #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "base/ast_internal/ast_impl.h" #include "base/ast_internal/expr.h" #include "checker/internal/test_ast_helpers.h" #include "checker/type_checker.h" #include "checker/type_checker_builder.h" #include "checker/validation_result.h" #include "common/ast.h" #include "common/constant.h" #include "internal/testing.h" namespace cel { namespace { using ::absl_testing::IsOk; using ::absl_testing::StatusIs; using ::cel::ast_internal::AstImpl; using ::cel::ast_internal::Reference; using ::testing::IsEmpty; using ::testing::Pointee; using ::testing::Property; TEST(StandardLibraryTest, StandardLibraryAddsDecls) { TypeCheckerBuilder builder; EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk()); EXPECT_THAT(std::move(builder).Build(), IsOk()); } TEST(StandardLibraryTest, StandardLibraryErrorsIfAddedTwice) { TypeCheckerBuilder builder; EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk()); EXPECT_THAT(builder.AddLibrary(StandardLibrary()), StatusIs(absl::StatusCode::kAlreadyExists)); } class StandardLibraryDefinitionsTest : public ::testing::Test { public: void SetUp() override { TypeCheckerBuilder builder; ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk()); ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build()); } protected: std::unique_ptr<TypeChecker> stdlib_type_checker_; }; class StdlibTypeVarDefinitionTest : public StandardLibraryDefinitionsTest, public testing::WithParamInterface<std::string> {}; TEST_P(StdlibTypeVarDefinitionTest, DefinesTypeConstants) { auto ast = std::make_unique<AstImpl>(); ast->root_expr().mutable_ident_expr().set_name(GetParam()); ast->root_expr().set_id(1); ASSERT_OK_AND_ASSIGN(ValidationResult result, stdlib_type_checker_->Check(std::move(ast))); EXPECT_THAT(result.GetIssues(), IsEmpty()); ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst()); const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast); EXPECT_THAT(checked_impl.GetReference(1), Pointee(Property(&Reference::name, GetParam()))); } INSTANTIATE_TEST_SUITE_P( StdlibTypeVarDefinitions, StdlibTypeVarDefinitionTest, ::testing::Values("bool", "int", "uint", "double", "string", "bytes", "list", "map", "duration", "timestamp", "null_type"), [](const auto& info) -> std::string { return info.param; }); TEST_F(StandardLibraryDefinitionsTest, DefinesProtoStructNull) { auto ast = std::make_unique<AstImpl>(); auto& enumerator = ast->root_expr(); enumerator.set_id(4); enumerator.mutable_select_expr().set_field("NULL_VALUE"); auto& enumeration = enumerator.mutable_select_expr().mutable_operand(); enumeration.set_id(3); enumeration.mutable_select_expr().set_field("NullValue"); auto& protobuf = enumeration.mutable_select_expr().mutable_operand(); protobuf.set_id(2); protobuf.mutable_select_expr().set_field("protobuf"); auto& google = protobuf.mutable_select_expr().mutable_operand(); google.set_id(1); google.mutable_ident_expr().set_name("google"); ASSERT_OK_AND_ASSIGN(ValidationResult result, stdlib_type_checker_->Check(std::move(ast))); EXPECT_THAT(result.GetIssues(), IsEmpty()); ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst()); const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast); EXPECT_THAT(checked_impl.GetReference(4), Pointee(Property(&Reference::name, "google.protobuf.NullValue.NULL_VALUE"))); } struct DefinitionsTestCase { std::string expr; bool type_check_success = true; CheckerOptions options; }; class StdLibDefinitionsTest : public ::testing::TestWithParam<DefinitionsTestCase> { public: void SetUp() override { TypeCheckerBuilder builder; ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk()); ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build()); } protected: std::unique_ptr<TypeChecker> stdlib_type_checker_; }; TEST_P(StdLibDefinitionsTest, Runner) { TypeCheckerBuilder builder(GetParam().options); ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk()); ASSERT_OK_AND_ASSIGN(std::unique_ptr<TypeChecker> type_checker, std::move(builder).Build()); ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast, checker_internal::MakeTestParsedAst(GetParam().expr)); ASSERT_OK_AND_ASSIGN(auto result, type_checker->Check(std::move(ast))); EXPECT_EQ(result.IsValid(), GetParam().type_check_success); } INSTANTIATE_TEST_SUITE_P( Strings, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "'123'.size()", }, DefinitionsTestCase{ "size('123')", }, DefinitionsTestCase{ "'123' + '123'", }, DefinitionsTestCase{ "'123' + '123'", }, DefinitionsTestCase{ "'123' + '123'", }, DefinitionsTestCase{ "'123'.endsWith('123')", }, DefinitionsTestCase{ "'123'.startsWith('123')", }, DefinitionsTestCase{ "'123'.contains('123')", }, DefinitionsTestCase{ "'123'.matches(r'123')", }, DefinitionsTestCase{ "matches('123', r'123')", })); INSTANTIATE_TEST_SUITE_P(TypeCasts, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "int(1)", }, DefinitionsTestCase{ "uint(1)", }, DefinitionsTestCase{ "double(1)", }, DefinitionsTestCase{ "string(1)", }, DefinitionsTestCase{ "bool('true')", }, DefinitionsTestCase{ "timestamp(0)", }, DefinitionsTestCase{ "duration('1s')", })); INSTANTIATE_TEST_SUITE_P(Arithmetic, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "1 + 2", }, DefinitionsTestCase{ "1 - 2", }, DefinitionsTestCase{ "1 / 2", }, DefinitionsTestCase{ "1 * 2", }, DefinitionsTestCase{ "2 % 1", }, DefinitionsTestCase{ "-1", })); INSTANTIATE_TEST_SUITE_P( TimeArithmetic, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "timestamp(0) + duration('1s')", }, DefinitionsTestCase{ "timestamp(0) - duration('1s')", }, DefinitionsTestCase{ "timestamp(0) - timestamp(0)", }, DefinitionsTestCase{ "duration('1s') + duration('1s')", }, DefinitionsTestCase{ "duration('1s') - duration('1s')", })); INSTANTIATE_TEST_SUITE_P(NumericComparisons, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "1 > 2", }, DefinitionsTestCase{ "1 < 2", }, DefinitionsTestCase{ "1 >= 2", }, DefinitionsTestCase{ "1 <= 2", })); INSTANTIATE_TEST_SUITE_P( CrossNumericComparisons, StdLibDefinitionsTest, ::testing::Values( DefinitionsTestCase{ "1u < 2", true, {.enable_cross_numeric_comparisons = true}}, DefinitionsTestCase{ "1u > 2", true, {.enable_cross_numeric_comparisons = true}}, DefinitionsTestCase{ "1u <= 2", true, {.enable_cross_numeric_comparisons = true}}, DefinitionsTestCase{ "1u >= 2", true, {.enable_cross_numeric_comparisons = true}})); INSTANTIATE_TEST_SUITE_P( TimeComparisons, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "duration('1s') < duration('1s')", }, DefinitionsTestCase{ "duration('1s') > duration('1s')", }, DefinitionsTestCase{ "duration('1s') <= duration('1s')", }, DefinitionsTestCase{ "duration('1s') >= duration('1s')", }, DefinitionsTestCase{ "timestamp(0) < timestamp(0)", }, DefinitionsTestCase{ "timestamp(0) > timestamp(0)", }, DefinitionsTestCase{ "timestamp(0) <= timestamp(0)", }, DefinitionsTestCase{ "timestamp(0) >= timestamp(0)", })); INSTANTIATE_TEST_SUITE_P( TimeAccessors, StdLibDefinitionsTest, ::testing::Values( DefinitionsTestCase{ "timestamp(0).getFullYear()", }, DefinitionsTestCase{ "timestamp(0).getFullYear('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getMonth()", }, DefinitionsTestCase{ "timestamp(0).getMonth('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getDayOfYear()", }, DefinitionsTestCase{ "timestamp(0).getDayOfYear('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getDate()", }, DefinitionsTestCase{ "timestamp(0).getDate('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getDayOfWeek()", }, DefinitionsTestCase{ "timestamp(0).getDayOfWeek('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getHours()", }, DefinitionsTestCase{ "duration('1s').getHours()", }, DefinitionsTestCase{ "timestamp(0).getHours('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getMinutes()", }, DefinitionsTestCase{ "duration('1s').getMinutes()", }, DefinitionsTestCase{ "timestamp(0).getMinutes('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getSeconds()", }, DefinitionsTestCase{ "duration('1s').getSeconds()", }, DefinitionsTestCase{ "timestamp(0).getSeconds('-08:00')", }, DefinitionsTestCase{ "timestamp(0).getMilliseconds()", }, DefinitionsTestCase{ "duration('1s').getMilliseconds()", }, DefinitionsTestCase{ "timestamp(0).getMilliseconds('-08:00')", })); INSTANTIATE_TEST_SUITE_P(Logic, StdLibDefinitionsTest, ::testing::Values(DefinitionsTestCase{ "true || false", }, DefinitionsTestCase{ "true && false", }, DefinitionsTestCase{ "!true", })); } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library_test.cc
4552db5798fb0853b131b783d8875794334fae7f
c51d0205-c7bd-4898-90c7-d1f12f00a9d1
cpp
tensorflow/tensorflow
non_max_suppression_op
tensorflow/core/kernels/image/non_max_suppression_op.cc
tensorflow/core/kernels/image/non_max_suppression_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/image/non_max_suppression_op.h" #include <cmath> #include <functional> #include <limits> #include <queue> #include <vector> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { namespace { typedef Eigen::ThreadPoolDevice CPUDevice; static inline void CheckScoreSizes(OpKernelContext* context, int num_boxes, const Tensor& scores) { OP_REQUIRES(context, scores.dims() == 1, errors::InvalidArgument( "scores must be 1-D", scores.shape().DebugString(), " (Shape must be rank 1 but is rank ", scores.dims(), ")")); OP_REQUIRES( context, scores.dim_size(0) == num_boxes, errors::InvalidArgument("scores has incompatible shape (Dimensions must " "be equal, but are ", num_boxes, " and ", scores.dim_size(0), ")")); } static inline void ParseAndCheckOverlapSizes(OpKernelContext* context, const Tensor& overlaps, int* num_boxes) { OP_REQUIRES(context, overlaps.dims() == 2, errors::InvalidArgument("overlaps must be 2-D", overlaps.shape().DebugString())); *num_boxes = overlaps.dim_size(0); OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes, errors::InvalidArgument("overlaps must be square", overlaps.shape().DebugString())); } static inline void ParseAndCheckBoxSizes(OpKernelContext* context, const Tensor& boxes, int* num_boxes) { OP_REQUIRES(context, boxes.dims() == 2, errors::InvalidArgument( "boxes must be 2-D", boxes.shape().DebugString(), " (Shape must be rank 2 but is rank ", boxes.dims(), ")")); *num_boxes = boxes.dim_size(0); OP_REQUIRES(context, boxes.dim_size(1) == 4, errors::InvalidArgument("boxes must have 4 columns (Dimension " "must be 4 but is ", boxes.dim_size(1), ")")); } static inline void CheckCombinedNMSScoreSizes(OpKernelContext* context, int num_boxes, const Tensor& scores) { OP_REQUIRES(context, scores.dims() == 3, errors::InvalidArgument("scores must be 3-D", scores.shape().DebugString())); OP_REQUIRES(context, scores.dim_size(1) == num_boxes, errors::InvalidArgument("scores has incompatible shape")); } static inline void ParseAndCheckCombinedNMSBoxSizes(OpKernelContext* context, const Tensor& boxes, int* num_boxes, const int num_classes) { OP_REQUIRES(context, boxes.dims() == 4, errors::InvalidArgument("boxes must be 4-D", boxes.shape().DebugString())); bool box_check = boxes.dim_size(2) == 1 || boxes.dim_size(2) == num_classes; OP_REQUIRES(context, box_check, errors::InvalidArgument( "third dimension of boxes must be either 1 or num classes")); *num_boxes = boxes.dim_size(1); OP_REQUIRES(context, boxes.dim_size(3) == 4, errors::InvalidArgument("boxes must have 4 columns")); } template <typename T> static inline float IOU(typename TTypes<T, 2>::ConstTensor boxes, int i, int j) { const float ymin_i = Eigen::numext::mini<float>( static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2))); const float xmin_i = Eigen::numext::mini<float>( static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3))); const float ymax_i = Eigen::numext::maxi<float>( static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2))); const float xmax_i = Eigen::numext::maxi<float>( static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3))); const float ymin_j = Eigen::numext::mini<float>( static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2))); const float xmin_j = Eigen::numext::mini<float>( static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3))); const float ymax_j = Eigen::numext::maxi<float>( static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2))); const float xmax_j = Eigen::numext::maxi<float>( static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3))); const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i); const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j); if (area_i <= 0 || area_j <= 0) { return 0.0; } const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j); const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j); const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j); const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j); const float intersection_area = Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) * Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0); return intersection_area / (area_i + area_j - intersection_area); } static inline float IOU(const float* boxes, int i, int j) { const float ymin_i = Eigen::numext::mini<float>(boxes[i], boxes[i + 2]); const float xmin_i = Eigen::numext::mini<float>(boxes[i + 1], boxes[i + 3]); const float ymax_i = Eigen::numext::maxi<float>(boxes[i], boxes[i + 2]); const float xmax_i = Eigen::numext::maxi<float>(boxes[i + 1], boxes[i + 3]); const float ymin_j = Eigen::numext::mini<float>(boxes[j], boxes[j + 2]); const float xmin_j = Eigen::numext::mini<float>(boxes[j + 1], boxes[j + 3]); const float ymax_j = Eigen::numext::maxi<float>(boxes[j], boxes[j + 2]); const float xmax_j = Eigen::numext::maxi<float>(boxes[j + 1], boxes[j + 3]); const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i); const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j); if (area_i <= 0 || area_j <= 0) { return 0.0; } const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j); const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j); const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j); const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j); const float intersection_area = Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) * Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0); return intersection_area / (area_i + area_j - intersection_area); } template <typename T> static inline T Overlap(typename TTypes<T, 2>::ConstTensor overlaps, int i, int j) { return overlaps(i, j); } template <typename T> static inline std::function<float(int, int)> CreateIOUSimilarityFn( const Tensor& boxes) { typename TTypes<T, 2>::ConstTensor boxes_data = boxes.tensor<T, 2>(); return std::bind(&IOU<T>, boxes_data, std::placeholders::_1, std::placeholders::_2); } template <typename T> static inline std::function<T(int, int)> CreateOverlapSimilarityFn( const Tensor& overlaps) { typename TTypes<T, 2>::ConstTensor overlaps_data = overlaps.tensor<float, 2>(); return std::bind(&Overlap<T>, overlaps_data, std::placeholders::_1, std::placeholders::_2); } template <typename T> void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores, int num_boxes, const Tensor& max_output_size, const T similarity_threshold, const T score_threshold, const T soft_nms_sigma, const std::function<float(int, int)>& similarity_fn, bool return_scores_tensor = false, bool pad_to_max_output_size = false, int* ptr_num_valid_outputs = nullptr) { const int output_size = max_output_size.scalar<int>()(); OP_REQUIRES(context, output_size >= 0, errors::InvalidArgument("output size must be non-negative")); std::vector<T> scores_data(num_boxes); std::copy_n(scores.flat<T>().data(), num_boxes, scores_data.begin()); struct Candidate { int box_index; T score; int suppress_begin_index; }; auto cmp = [](const Candidate bs_i, const Candidate bs_j) { return ((bs_i.score == bs_j.score) && (bs_i.box_index > bs_j.box_index)) || bs_i.score < bs_j.score; }; std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)> candidate_priority_queue(cmp); for (int i = 0; i < scores_data.size(); ++i) { if (scores_data[i] > score_threshold) { candidate_priority_queue.emplace(Candidate({i, scores_data[i], 0})); } } T scale = static_cast<T>(0.0); bool is_soft_nms = soft_nms_sigma > static_cast<T>(0.0); if (is_soft_nms) { scale = static_cast<T>(-0.5) / soft_nms_sigma; } auto suppress_weight = [similarity_threshold, scale, is_soft_nms](const T sim) { const T weight = Eigen::numext::exp<T>(scale * sim * sim); return is_soft_nms || sim <= similarity_threshold ? weight : static_cast<T>(0.0); }; std::vector<int> selected; std::vector<T> selected_scores; float similarity; T original_score; Candidate next_candidate; while (selected.size() < output_size && !candidate_priority_queue.empty()) { next_candidate = candidate_priority_queue.top(); original_score = next_candidate.score; candidate_priority_queue.pop(); bool should_hard_suppress = false; for (int j = static_cast<int>(selected.size()) - 1; j >= next_candidate.suppress_begin_index; --j) { similarity = similarity_fn(next_candidate.box_index, selected[j]); next_candidate.score *= suppress_weight(static_cast<T>(similarity)); if (!is_soft_nms && static_cast<T>(similarity) > similarity_threshold) { should_hard_suppress = true; break; } if (next_candidate.score <= score_threshold) break; } next_candidate.suppress_begin_index = selected.size(); if (!should_hard_suppress) { if (next_candidate.score == original_score) { selected.push_back(next_candidate.box_index); selected_scores.push_back(next_candidate.score); continue; } if (next_candidate.score > score_threshold) { candidate_priority_queue.push(next_candidate); } } } int num_valid_outputs = selected.size(); if (pad_to_max_output_size) { selected.resize(output_size, 0); selected_scores.resize(output_size, static_cast<T>(0)); } if (ptr_num_valid_outputs) { *ptr_num_valid_outputs = num_valid_outputs; } Tensor* output_indices = nullptr; TensorShape output_shape({static_cast<int>(selected.size())}); OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output_indices)); TTypes<int, 1>::Tensor output_indices_data = output_indices->tensor<int, 1>(); std::copy_n(selected.begin(), selected.size(), output_indices_data.data()); if (return_scores_tensor) { Tensor* output_scores = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, output_shape, &output_scores)); typename TTypes<T, 1>::Tensor output_scores_data = output_scores->tensor<T, 1>(); std::copy_n(selected_scores.begin(), selected_scores.size(), output_scores_data.data()); } } struct ResultCandidate { int box_index; float score; int class_idx; float box_coord[4]; }; void DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data, const float* scores_data, int num_boxes, int q, int num_classes, const int size_per_class, const float score_threshold, const float iou_threshold, std::vector<ResultCandidate>& result_candidate_vec) { struct Candidate { int box_index; float score; }; auto cmp = [](const Candidate bs_i, const Candidate bs_j) { return bs_i.score < bs_j.score; }; std::priority_queue<Candidate, std::vector<Candidate>, decltype(cmp)> candidate_priority_queue(cmp); float temp_score; for (int i = 0; i < num_boxes; ++i) { temp_score = scores_data[i * num_classes + class_idx]; if (temp_score > score_threshold) { candidate_priority_queue.emplace(Candidate({i, temp_score})); } } std::vector<int> selected; Candidate next_candidate; int candidate_box_data_idx, selected_box_data_idx, class_box_idx; class_box_idx = (q > 1) ? class_idx : 0; float iou; while (selected.size() < size_per_class && !candidate_priority_queue.empty()) { next_candidate = candidate_priority_queue.top(); candidate_priority_queue.pop(); candidate_box_data_idx = (next_candidate.box_index * q + class_box_idx) * 4; bool should_select = true; for (int j = selected.size() - 1; j >= 0; --j) { selected_box_data_idx = (selected[j] * q + class_box_idx) * 4; iou = IOU(boxes_data, candidate_box_data_idx, selected_box_data_idx); if (iou > iou_threshold) { should_select = false; break; } } if (should_select) { result_candidate_vec[selected.size() + size_per_class * class_idx] = { next_candidate.box_index, next_candidate.score, class_idx, {boxes_data[candidate_box_data_idx], boxes_data[candidate_box_data_idx + 1], boxes_data[candidate_box_data_idx + 2], boxes_data[candidate_box_data_idx + 3]}}; selected.push_back(next_candidate.box_index); } } } void SelectResultPerBatch(std::vector<float>& nmsed_boxes, std::vector<float>& nmsed_scores, std::vector<float>& nmsed_classes, std::vector<ResultCandidate>& result_candidate_vec, std::vector<int>& final_valid_detections, const int batch_idx, int total_size_per_batch, bool pad_per_class, int max_size_per_batch, bool clip_boxes, int per_batch_size) { auto rc_cmp = [](const ResultCandidate rc_i, const ResultCandidate rc_j) { return rc_i.score > rc_j.score; }; std::sort(result_candidate_vec.begin(), result_candidate_vec.end(), rc_cmp); int max_detections = 0; int result_candidate_size = std::count_if(result_candidate_vec.begin(), result_candidate_vec.end(), [](ResultCandidate rc) { return rc.box_index > -1; }); if (!pad_per_class) { max_detections = std::min(result_candidate_size, total_size_per_batch); } else { max_detections = std::min(per_batch_size, result_candidate_size); } final_valid_detections[batch_idx] = max_detections; int curr_total_size = max_detections; int result_idx = 0; while (curr_total_size > 0 && result_idx < result_candidate_vec.size()) { ResultCandidate next_candidate = result_candidate_vec[result_idx++]; if (clip_boxes) { const float box_min = 0.0; const float box_max = 1.0; nmsed_boxes.push_back( std::max(std::min(next_candidate.box_coord[0], box_max), box_min)); nmsed_boxes.push_back( std::max(std::min(next_candidate.box_coord[1], box_max), box_min)); nmsed_boxes.push_back( std::max(std::min(next_candidate.box_coord[2], box_max), box_min)); nmsed_boxes.push_back( std::max(std::min(next_candidate.box_coord[3], box_max), box_min)); } else { nmsed_boxes.push_back(next_candidate.box_coord[0]); nmsed_boxes.push_back(next_candidate.box_coord[1]); nmsed_boxes.push_back(next_candidate.box_coord[2]); nmsed_boxes.push_back(next_candidate.box_coord[3]); } nmsed_scores.push_back(next_candidate.score); nmsed_classes.push_back(next_candidate.class_idx); curr_total_size--; } nmsed_boxes.resize(per_batch_size * 4, 0); nmsed_scores.resize(per_batch_size, 0); nmsed_classes.resize(per_batch_size, 0); } void BatchedNonMaxSuppressionOp( OpKernelContext* context, const Tensor& inp_boxes, const Tensor& inp_scores, int num_boxes, const int max_size_per_class, const int total_size_per_batch, const float score_threshold, const float iou_threshold, bool pad_per_class = false, bool clip_boxes = true) { const int num_batches = inp_boxes.dim_size(0); int num_classes = inp_scores.dim_size(2); int q = inp_boxes.dim_size(2); const float* scores_data = const_cast<float*>(inp_scores.flat<float>().data()); const float* boxes_data = const_cast<float*>(inp_boxes.flat<float>().data()); int boxes_per_batch = num_boxes * q * 4; int scores_per_batch = num_boxes * num_classes; const int size_per_class = std::min(max_size_per_class, num_boxes); std::vector<std::vector<ResultCandidate>> result_candidate_vec( num_batches, std::vector<ResultCandidate>(size_per_class * num_classes, {-1, -1.0, -1, {0.0, 0.0, 0.0, 0.0}})); std::vector<std::vector<float>> nmsed_boxes(num_batches); std::vector<std::vector<float>> nmsed_scores(num_batches); std::vector<std::vector<float>> nmsed_classes(num_batches); std::vector<int> final_valid_detections(num_batches); auto shard_nms = [&](int begin, int end) { for (int idx = begin; idx < end; ++idx) { int batch_idx = idx / num_classes; int class_idx = idx % num_classes; DoNMSPerClass(batch_idx, class_idx, boxes_data + boxes_per_batch * batch_idx, scores_data + scores_per_batch * batch_idx, num_boxes, q, num_classes, size_per_class, score_threshold, iou_threshold, result_candidate_vec[batch_idx]); } }; int length = num_batches * num_classes; int input_bytes = num_boxes * 10 * sizeof(float); int output_bytes = num_boxes * 10 * sizeof(float); int compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 14 + Eigen::TensorOpCost::MulCost<int>() * num_boxes * 9 + Eigen::TensorOpCost::MulCost<float>() * num_boxes * 9 + Eigen::TensorOpCost::AddCost<float>() * num_boxes * 8; const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles); const CPUDevice& d = context->eigen_device<CPUDevice>(); d.parallelFor(length, cost, shard_nms); int per_batch_size = total_size_per_batch; int max_total_size = static_cast<int>( std::min(static_cast<int64_t>(std::numeric_limits<int>::max()), static_cast<int64_t>(max_size_per_class) * num_classes)); if (pad_per_class) { per_batch_size = std::min(total_size_per_batch, max_total_size); } Tensor* valid_detections_t = nullptr; TensorShape valid_detections_shape({num_batches}); OP_REQUIRES_OK(context, context->allocate_output(3, valid_detections_shape, &valid_detections_t)); auto valid_detections_flat = valid_detections_t->template flat<int>(); auto shard_result = [&](int begin, int end) { for (int batch_idx = begin; batch_idx < end; ++batch_idx) { SelectResultPerBatch( nmsed_boxes[batch_idx], nmsed_scores[batch_idx], nmsed_classes[batch_idx], result_candidate_vec[batch_idx], final_valid_detections, batch_idx, total_size_per_batch, pad_per_class, max_total_size, clip_boxes, per_batch_size); valid_detections_flat(batch_idx) = final_valid_detections[batch_idx]; } }; length = num_batches; input_bytes = num_boxes * 10 * sizeof(float) + per_batch_size * 6 * sizeof(float); output_bytes = num_boxes * 5 * sizeof(float) + per_batch_size * 6 * sizeof(float); compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 5 + Eigen::TensorOpCost::AddCost<float>() * num_boxes * 5; const Eigen::TensorOpCost cost_result(input_bytes, output_bytes, compute_cycles); d.parallelFor(length, cost_result, shard_result); Tensor* nmsed_boxes_t = nullptr; TensorShape boxes_shape({num_batches, per_batch_size, 4}); OP_REQUIRES_OK(context, context->allocate_output(0, boxes_shape, &nmsed_boxes_t)); auto nmsed_boxes_flat = nmsed_boxes_t->template flat<float>(); Tensor* nmsed_scores_t = nullptr; TensorShape scores_shape({num_batches, per_batch_size}); OP_REQUIRES_OK(context, context->allocate_output(1, scores_shape, &nmsed_scores_t)); auto nmsed_scores_flat = nmsed_scores_t->template flat<float>(); Tensor* nmsed_classes_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, scores_shape, &nmsed_classes_t)); auto nmsed_classes_flat = nmsed_classes_t->template flat<float>(); auto shard_copy_result = [&](int begin, int end) { for (int idx = begin; idx < end; ++idx) { int batch_idx = idx / per_batch_size; int j = idx % per_batch_size; nmsed_scores_flat(idx) = nmsed_scores[batch_idx][j]; nmsed_classes_flat(idx) = nmsed_classes[batch_idx][j]; for (int k = 0; k < 4; ++k) { nmsed_boxes_flat(idx * 4 + k) = nmsed_boxes[batch_idx][j * 4 + k]; } } }; length = num_batches * per_batch_size; input_bytes = 6 * sizeof(float); output_bytes = 6 * sizeof(float); compute_cycles = Eigen::TensorOpCost::AddCost<int>() * 2 + Eigen::TensorOpCost::MulCost<int>() * 2 + Eigen::TensorOpCost::DivCost<float>() * 2; const Eigen::TensorOpCost cost_copy_result(input_bytes, output_bytes, compute_cycles); d.parallelFor(length, cost_copy_result, shard_copy_result); } template <typename T> T GetScalar(const Tensor& tensor) { switch (tensor.dtype()) { case DT_FLOAT: return static_cast<T>(tensor.scalar<float>()()); case DT_DOUBLE: return static_cast<T>(tensor.scalar<double>()()); case DT_BFLOAT16: return static_cast<T>(tensor.scalar<Eigen::bfloat16>()()); case DT_HALF: return static_cast<T>(tensor.scalar<Eigen::half>()()); default: DCHECK(false) << "Unsupported type " << tensor.dtype(); break; } return static_cast<T>(0); } } template <typename Device> class NonMaxSuppressionOp : public OpKernel { public: explicit NonMaxSuppressionOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("iou_threshold", &iou_threshold_)); } void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString())); OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1, errors::InvalidArgument("iou_threshold must be in [0, 1]")); int num_boxes = 0; ParseAndCheckBoxSizes(context, boxes, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateIOUSimilarityFn<float>(boxes); const float score_threshold_val = std::numeric_limits<float>::lowest(); const float dummy_soft_nms_sigma = static_cast<float>(0.0); DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size, iou_threshold_, score_threshold_val, dummy_soft_nms_sigma, similarity_fn); } private: float iou_threshold_; }; template <typename Device, typename T> class NonMaxSuppressionV2Op : public OpKernel { public: explicit NonMaxSuppressionV2Op(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString())); const Tensor& iou_threshold = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()), errors::InvalidArgument("iou_threshold must be 0-D, got shape ", iou_threshold.shape().DebugString())); const T iou_threshold_val = GetScalar<T>(iou_threshold); OP_REQUIRES(context, iou_threshold_val >= static_cast<T>(0.0) && iou_threshold_val <= static_cast<T>(1.0), errors::InvalidArgument("iou_threshold must be in [0, 1]")); int num_boxes = 0; ParseAndCheckBoxSizes(context, boxes, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateIOUSimilarityFn<T>(boxes); const T score_threshold_val = std::numeric_limits<T>::lowest(); const T dummy_soft_nms_sigma = static_cast<T>(0.0); DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn); } }; template <typename Device, typename T> class NonMaxSuppressionV3Op : public OpKernel { public: explicit NonMaxSuppressionV3Op(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString(), " (Shape must be rank 0 but is ", "rank ", max_output_size.dims(), ")")); const Tensor& iou_threshold = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()), errors::InvalidArgument("iou_threshold must be 0-D, got shape ", iou_threshold.shape().DebugString(), " (Shape must be rank 0 but is rank ", iou_threshold.dims(), ")")); const T iou_threshold_val = GetScalar<T>(iou_threshold); OP_REQUIRES(context, iou_threshold_val >= static_cast<T>(0.0) && iou_threshold_val <= static_cast<T>(1.0), errors::InvalidArgument("iou_threshold must be in [0, 1]")); const Tensor& score_threshold = context->input(4); OP_REQUIRES( context, TensorShapeUtils::IsScalar(score_threshold.shape()), errors::InvalidArgument("score_threshold must be 0-D, got shape ", score_threshold.shape().DebugString())); const T score_threshold_val = GetScalar<T>(score_threshold); int num_boxes = 0; ParseAndCheckBoxSizes(context, boxes, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateIOUSimilarityFn<T>(boxes); const T dummy_soft_nms_sigma = static_cast<T>(0.0); DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn); } }; template <typename Device, typename T> class NonMaxSuppressionV4Op : public OpKernel { public: explicit NonMaxSuppressionV4Op(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size", &pad_to_max_output_size_)); } void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString())); const Tensor& iou_threshold = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()), errors::InvalidArgument("iou_threshold must be 0-D, got shape ", iou_threshold.shape().DebugString())); const T iou_threshold_val = GetScalar<T>(iou_threshold); OP_REQUIRES(context, iou_threshold_val >= static_cast<T>(0.0) && iou_threshold_val <= static_cast<T>(1.0), errors::InvalidArgument("iou_threshold must be in [0, 1]")); const Tensor& score_threshold = context->input(4); OP_REQUIRES( context, TensorShapeUtils::IsScalar(score_threshold.shape()), errors::InvalidArgument("score_threshold must be 0-D, got shape ", score_threshold.shape().DebugString())); const T score_threshold_val = GetScalar<T>(score_threshold); int num_boxes = 0; ParseAndCheckBoxSizes(context, boxes, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateIOUSimilarityFn<T>(boxes); int num_valid_outputs; bool return_scores_tensor_ = false; const T dummy_soft_nms_sigma = static_cast<T>(0.0); DoNonMaxSuppressionOp<T>( context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); if (!context->status().ok()) { return; } Tensor* num_outputs_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, tensorflow::TensorShape{}, &num_outputs_t)); num_outputs_t->scalar<int32>().setConstant(num_valid_outputs); } private: bool pad_to_max_output_size_; }; template <typename Device, typename T> class NonMaxSuppressionV5Op : public OpKernel { public: explicit NonMaxSuppressionV5Op(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size", &pad_to_max_output_size_)); } void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString())); const Tensor& iou_threshold = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()), errors::InvalidArgument("iou_threshold must be 0-D, got shape ", iou_threshold.shape().DebugString())); const T iou_threshold_val = iou_threshold.scalar<T>()(); OP_REQUIRES(context, iou_threshold_val >= static_cast<T>(0.0) && iou_threshold_val <= static_cast<T>(1.0), errors::InvalidArgument("iou_threshold must be in [0, 1]")); const Tensor& score_threshold = context->input(4); OP_REQUIRES( context, TensorShapeUtils::IsScalar(score_threshold.shape()), errors::InvalidArgument("score_threshold must be 0-D, got shape ", score_threshold.shape().DebugString())); const T score_threshold_val = score_threshold.scalar<T>()(); const Tensor& soft_nms_sigma = context->input(5); OP_REQUIRES( context, TensorShapeUtils::IsScalar(soft_nms_sigma.shape()), errors::InvalidArgument("soft_nms_sigma must be 0-D, got shape ", soft_nms_sigma.shape().DebugString())); const T soft_nms_sigma_val = soft_nms_sigma.scalar<T>()(); OP_REQUIRES(context, soft_nms_sigma_val >= static_cast<T>(0.0), errors::InvalidArgument("soft_nms_sigma_val must be >= 0")); int num_boxes = 0; ParseAndCheckBoxSizes(context, boxes, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateIOUSimilarityFn<T>(boxes); int num_valid_outputs; const bool return_scores_tensor_ = true; DoNonMaxSuppressionOp<T>( context, scores, num_boxes, max_output_size, iou_threshold_val, score_threshold_val, soft_nms_sigma_val, similarity_fn, return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs); if (!context->status().ok()) { return; } Tensor* num_outputs_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, tensorflow::TensorShape{}, &num_outputs_t)); num_outputs_t->scalar<int32>().setConstant(num_valid_outputs); } private: bool pad_to_max_output_size_; }; template <typename Device> class NonMaxSuppressionWithOverlapsOp : public OpKernel { public: explicit NonMaxSuppressionWithOverlapsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& overlaps = context->input(0); const Tensor& scores = context->input(1); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_output_size must be 0-D, got shape ", max_output_size.shape().DebugString())); const Tensor& overlap_threshold = context->input(3); OP_REQUIRES( context, TensorShapeUtils::IsScalar(overlap_threshold.shape()), errors::InvalidArgument("overlap_threshold must be 0-D, got shape ", overlap_threshold.shape().DebugString())); const float overlap_threshold_val = overlap_threshold.scalar<float>()(); const Tensor& score_threshold = context->input(4); OP_REQUIRES( context, TensorShapeUtils::IsScalar(score_threshold.shape()), errors::InvalidArgument("score_threshold must be 0-D, got shape ", score_threshold.shape().DebugString())); const float score_threshold_val = score_threshold.scalar<float>()(); int num_boxes = 0; ParseAndCheckOverlapSizes(context, overlaps, &num_boxes); CheckScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } auto similarity_fn = CreateOverlapSimilarityFn<float>(overlaps); const float dummy_soft_nms_sigma = static_cast<float>(0.0); DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size, overlap_threshold_val, score_threshold_val, dummy_soft_nms_sigma, similarity_fn); } }; template <typename Device> class CombinedNonMaxSuppressionOp : public OpKernel { public: explicit CombinedNonMaxSuppressionOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pad_per_class", &pad_per_class_)); OP_REQUIRES_OK(context, context->GetAttr("clip_boxes", &clip_boxes_)); } void Compute(OpKernelContext* context) override { const Tensor& boxes = context->input(0); const Tensor& scores = context->input(1); OP_REQUIRES( context, (boxes.dim_size(0) == scores.dim_size(0)), errors::InvalidArgument("boxes and scores must have same batch size")); const Tensor& max_output_size = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_output_size.shape()), errors::InvalidArgument("max_size_per_class must be 0-D, got shape ", max_output_size.shape().DebugString())); const int max_size_per_class = max_output_size.scalar<int>()(); OP_REQUIRES(context, max_size_per_class > 0, errors::InvalidArgument("max_size_per_class must be positive")); const Tensor& max_total_size = context->input(3); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_total_size.shape()), errors::InvalidArgument("max_total_size must be 0-D, got shape ", max_total_size.shape().DebugString())); const int max_total_size_per_batch = max_total_size.scalar<int>()(); OP_REQUIRES(context, max_total_size_per_batch > 0, errors::InvalidArgument("max_total_size must be > 0")); if (max_total_size_per_batch > pow(10, 6)) { LOG(WARNING) << "Detected a large value for `max_total_size`. This may " << "cause OOM error. (max_total_size: " << max_total_size.scalar<int>()() << ")"; } const Tensor& iou_threshold = context->input(4); OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()), errors::InvalidArgument("iou_threshold must be 0-D, got shape ", iou_threshold.shape().DebugString())); const float iou_threshold_val = iou_threshold.scalar<float>()(); const Tensor& score_threshold = context->input(5); OP_REQUIRES( context, TensorShapeUtils::IsScalar(score_threshold.shape()), errors::InvalidArgument("score_threshold must be 0-D, got shape ", score_threshold.shape().DebugString())); const float score_threshold_val = score_threshold.scalar<float>()(); OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1, errors::InvalidArgument("iou_threshold must be in [0, 1]")); int num_boxes = 0; const int num_classes = scores.dim_size(2); ParseAndCheckCombinedNMSBoxSizes(context, boxes, &num_boxes, num_classes); CheckCombinedNMSScoreSizes(context, num_boxes, scores); if (!context->status().ok()) { return; } BatchedNonMaxSuppressionOp(context, boxes, scores, num_boxes, max_size_per_class, max_total_size_per_batch, score_threshold_val, iou_threshold_val, pad_per_class_, clip_boxes_); } private: bool pad_per_class_; bool clip_boxes_; }; REGISTER_KERNEL_BUILDER(Name("NonMaxSuppression").Device(DEVICE_CPU), NonMaxSuppressionOp<CPUDevice>); REGISTER_KERNEL_BUILDER( Name("NonMaxSuppressionV2").TypeConstraint<float>("T").Device(DEVICE_CPU), NonMaxSuppressionV2Op<CPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV2") .TypeConstraint<Eigen::half>("T") .Device(DEVICE_CPU), NonMaxSuppressionV2Op<CPUDevice, Eigen::half>); REGISTER_KERNEL_BUILDER( Name("NonMaxSuppressionV3").TypeConstraint<float>("T").Device(DEVICE_CPU), NonMaxSuppressionV3Op<CPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV3") .TypeConstraint<Eigen::half>("T") .Device(DEVICE_CPU), NonMaxSuppressionV3Op<CPUDevice, Eigen::half>); REGISTER_KERNEL_BUILDER( Name("NonMaxSuppressionV4").TypeConstraint<float>("T").Device(DEVICE_CPU), NonMaxSuppressionV4Op<CPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV4") .TypeConstraint<Eigen::half>("T") .Device(DEVICE_CPU), NonMaxSuppressionV4Op<CPUDevice, Eigen::half>); REGISTER_KERNEL_BUILDER( Name("NonMaxSuppressionV5").TypeConstraint<float>("T").Device(DEVICE_CPU), NonMaxSuppressionV5Op<CPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV5") .TypeConstraint<Eigen::half>("T") .Device(DEVICE_CPU), NonMaxSuppressionV5Op<CPUDevice, Eigen::half>); REGISTER_KERNEL_BUILDER( Name("NonMaxSuppressionWithOverlaps").Device(DEVICE_CPU), NonMaxSuppressionWithOverlapsOp<CPUDevice>); REGISTER_KERNEL_BUILDER(Name("CombinedNonMaxSuppression").Device(DEVICE_CPU), CombinedNonMaxSuppressionOp<CPUDevice>); }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class NonMaxSuppressionOpTest : public OpsTestBase { protected: void MakeOp(float iou_threshold) { TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppression") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Attr("iou_threshold", iou_threshold) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClusters) { MakeOp(.5); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClustersFlippedCoordinates) { MakeOp(.5); AddInputFromArray<float>(TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f, 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) { MakeOp(.5); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectWithNegativeScores) { MakeOp(.5); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>( TensorShape({6}), {.9f - 10.0f, .75f - 10.0f, .6f - 10.0f, .95f - 10.0f, .5f - 10.0f, .3f - 10.0f}); AddInputFromArray<int>(TensorShape({}), {6}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestFirstBoxDegenerate) { MakeOp(.5); AddInputFromArray<float>(TensorShape({3, 4}), {0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3}); AddInputFromArray<float>(TensorShape({3}), {.9f, .75f, .6f}); AddInputFromArray<int>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {0, 1, 2}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) { MakeOp(.5); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {30}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectSingleBox) { MakeOp(.5); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestSelectFromTenIdenticalBoxes) { MakeOp(.5); int num_boxes = 10; std::vector<float> corners(num_boxes * 4); std::vector<float> scores(num_boxes); for (int i = 0; i < num_boxes; ++i) { corners[i * 4 + 0] = 0; corners[i * 4 + 1] = 0; corners[i * 4 + 2] = 1; corners[i * 4 + 3] = 1; scores[i] = .9; } AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners); AddInputFromArray<float>(TensorShape({num_boxes}), scores); AddInputFromArray<int>(TensorShape({}), {3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionOpTest, TestInconsistentBoxAndScoreShapes) { MakeOp(.5); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f}); AddInputFromArray<int>(TensorShape({}), {30}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape")) << s; } TEST_F(NonMaxSuppressionOpTest, TestInvalidIOUThreshold) { MakeOp(1.2); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE( absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]")) << s; } TEST_F(NonMaxSuppressionOpTest, TestEmptyInput) { MakeOp(.5); AddInputFromArray<float>(TensorShape({0, 4}), {}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<int>(TensorShape({}), {30}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({0})); test::FillValues<int>(&expected, {}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } class NonMaxSuppressionV2OpTest : public OpsTestBase { protected: void MakeOp() { TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV2") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClusters) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClustersFlippedCoordinates) { MakeOp(); AddInputFromArray<float>(TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f, 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestSelectSingleBox) { MakeOp(); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromTenIdenticalBoxes) { MakeOp(); int num_boxes = 10; std::vector<float> corners(num_boxes * 4); std::vector<float> scores(num_boxes); for (int i = 0; i < num_boxes; ++i) { corners[i * 4 + 0] = 0; corners[i * 4 + 1] = 0; corners[i * 4 + 2] = 1; corners[i * 4 + 3] = 1; scores[i] = .9; } AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners); AddInputFromArray<float>(TensorShape({num_boxes}), scores); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionV2OpTest, TestInconsistentBoxAndScoreShapes) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape")) << s; } TEST_F(NonMaxSuppressionV2OpTest, TestInvalidIOUThreshold) { MakeOp(); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {1.2f}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE( absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]")) << s; } TEST_F(NonMaxSuppressionV2OpTest, TestEmptyInput) { MakeOp(); AddInputFromArray<float>(TensorShape({0, 4}), {}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({0})); test::FillValues<int>(&expected, {}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } using NmsValidTypes = ::testing::Types<std::pair<float, float>, std::pair<float, Eigen::half>, std::pair<Eigen::half, Eigen::half>, std::pair<Eigen::half, float> >; template <typename InputAndThresholdTypes> class NonMaxSuppressionV3OpTest : public OpsTestBase { protected: using InputType = typename InputAndThresholdTypes::first_type; using ThresholdType = typename InputAndThresholdTypes::second_type; void MakeOp() { constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value; constexpr DataType kThresholdDataType = DataTypeToEnum<ThresholdType>::value; TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV3") .Input(FakeInput(kInputDataType)) .Input(FakeInput(kInputDataType)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(kThresholdDataType)) .Input(FakeInput(kThresholdDataType)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TYPED_TEST_SUITE(NonMaxSuppressionV3OpTest, NmsValidTypes); TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClusters) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClustersWithScoreThreshold) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClustersWithScoreThresholdZeroScores) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType, float>(TensorShape({6}), {.1, 0, 0, .3, .2, -5.0}); this->template AddInputFromList<int>(TensorShape({}), {6}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {-3.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClustersFlippedCoordinates) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f, 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {2}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {30}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectSingleBox) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1}); this->template AddInputFromList<InputType>(TensorShape({1}), {.9f}); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromTenIdenticalBoxes) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); int num_boxes = 10; std::vector<InputType> corners(num_boxes * 4); std::vector<InputType> scores(num_boxes); for (int i = 0; i < num_boxes; ++i) { corners[i * 4 + 0] = static_cast<InputType>(0); corners[i * 4 + 1] = static_cast<InputType>(0); corners[i * 4 + 2] = static_cast<InputType>(1); corners[i * 4 + 3] = static_cast<InputType>(1); scores[i] = static_cast<InputType>(.9); } this->template AddInputFromArray<InputType>(TensorShape({num_boxes, 4}), corners); this->template AddInputFromArray<InputType>(TensorShape({num_boxes}), scores); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } TYPED_TEST(NonMaxSuppressionV3OpTest, TestInconsistentBoxAndScoreShapes) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f}); this->template AddInputFromList<int>(TensorShape({}), {30}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0}); Status s = this->RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape")) << s; } TYPED_TEST(NonMaxSuppressionV3OpTest, TestInvalidIOUThreshold) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1}); this->template AddInputFromList<InputType>(TensorShape({1}), {.9f}); this->template AddInputFromList<int>(TensorShape({}), {3}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {1.2f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0}); Status s = this->RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE( absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]")) << s; } TYPED_TEST(NonMaxSuppressionV3OpTest, TestEmptyInput) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromArray<InputType>(TensorShape({0, 4}), {}); this->template AddInputFromArray<InputType>(TensorShape({0}), {}); this->template AddInputFromList<int>(TensorShape({}), {30}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); Tensor expected(this->allocator(), DT_INT32, TensorShape({0})); test::FillValues<int>(&expected, {}); test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0))); } template <typename InputAndThresholdTypes> class NonMaxSuppressionV4OpTest : public OpsTestBase { protected: using InputType = typename InputAndThresholdTypes::first_type; using ThresholdType = typename InputAndThresholdTypes::second_type; void MakeOp() { constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value; constexpr DataType kThresholdDataType = DataTypeToEnum<ThresholdType>::value; TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV4") .Input(FakeInput(kInputDataType)) .Input(FakeInput(kInputDataType)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(kThresholdDataType)) .Input(FakeInput(kThresholdDataType)) .Attr("pad_to_max_output_size", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TYPED_TEST_SUITE(NonMaxSuppressionV4OpTest, NmsValidTypes); TYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFive) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {5}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f}); TF_ASSERT_OK(this->RunOpKernel()); const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0}); test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0))); Tensor expected_num_valid = test::AsScalar<int>(3); test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1))); } TYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFiveScoreThr) { using InputType = typename TestFixture::InputType; using ThresholdType = typename TestFixture::ThresholdType; this->MakeOp(); this->template AddInputFromList<InputType, float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); this->template AddInputFromList<InputType>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); this->template AddInputFromList<int>(TensorShape({}), {6}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f}); this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f}); TF_ASSERT_OK(this->RunOpKernel()); const auto expected_indices = test::AsTensor<int>({3, 0, 0, 0, 0, 0}); test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0))); Tensor expected_num_valid = test::AsScalar<int>(2); test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1))); } class NonMaxSuppressionV5OpTest : public OpsTestBase { protected: void MakeOp() { TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV5") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("pad_to_max_output_size", true) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersPadFive) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {5}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0}); test::ExpectTensorEqual<int>(expected_indices, *GetOutput(0)); const auto expected_scores = test::AsTensor<float>({.95f, .9f, .3f, 0.0f, 0.0f}); test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2); Tensor expected_num_valid = test::AsScalar<int>(3); test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2)); } TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersWithSoftNMS) { MakeOp(); AddInputFromArray<float>( TensorShape({6, 4}), {0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {6}); AddInputFromArray<float>(TensorShape({}), {0.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); AddInputFromArray<float>(TensorShape({}), {0.5f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({6})); test::FillValues<int>(&expected, {3, 0, 1, 5, 4, 2}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({6})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.384, 0.3, 0.256, 0.197}); test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2); Tensor expected_num_valid = test::AsScalar<int>(6); test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2)); } class NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase { protected: void MakeOp() { TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionWithOverlaps") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } void AddIoUInput(const std::vector<float>& boxes) { ASSERT_EQ((boxes.size() % 4), 0); size_t num_boxes = boxes.size() / 4; std::vector<float> iou_overlaps(num_boxes * num_boxes); auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) { return boxes[box_idx * 4 + corner_idx]; }; for (size_t i = 0; i < num_boxes; ++i) { for (size_t j = 0; j < num_boxes; ++j) { const float ymin_i = std::min<float>(corner_access(i, 0), corner_access(i, 2)); const float xmin_i = std::min<float>(corner_access(i, 1), corner_access(i, 3)); const float ymax_i = std::max<float>(corner_access(i, 0), corner_access(i, 2)); const float xmax_i = std::max<float>(corner_access(i, 1), corner_access(i, 3)); const float ymin_j = std::min<float>(corner_access(j, 0), corner_access(j, 2)); const float xmin_j = std::min<float>(corner_access(j, 1), corner_access(j, 3)); const float ymax_j = std::max<float>(corner_access(j, 0), corner_access(j, 2)); const float xmax_j = std::max<float>(corner_access(j, 1), corner_access(j, 3)); const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i); const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j); float iou; if (area_i <= 0 || area_j <= 0) { iou = 0.0; } else { const float intersection_ymin = std::max<float>(ymin_i, ymin_j); const float intersection_xmin = std::max<float>(xmin_i, xmin_j); const float intersection_ymax = std::min<float>(ymax_i, ymax_j); const float intersection_xmax = std::min<float>(xmax_i, xmax_j); const float intersection_area = std::max<float>(intersection_ymax - intersection_ymin, 0.0) * std::max<float>(intersection_xmax - intersection_xmin, 0.0); iou = intersection_area / (area_i + area_j - intersection_area); } iou_overlaps[i * num_boxes + j] = iou; } } AddInputFromArray<float>(TensorShape({static_cast<signed>(num_boxes), static_cast<signed>(num_boxes)}), iou_overlaps); } }; TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClusters) { MakeOp(); AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClustersFlippedCoordinates) { MakeOp(); AddIoUInput({1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f, 0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) { MakeOp(); AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {2}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected, {3, 0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) { MakeOp(); AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({3})); test::FillValues<int>(&expected, {3, 0, 5}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectSingleBox) { MakeOp(); AddIoUInput({0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromTenIdenticalBoxes) { MakeOp(); int num_boxes = 10; std::vector<float> corners(num_boxes * 4); std::vector<float> scores(num_boxes); for (int i = 0; i < num_boxes; ++i) { corners[i * 4 + 0] = 0; corners[i * 4 + 1] = 0; corners[i * 4 + 2] = 1; corners[i * 4 + 3] = 1; scores[i] = .9; } AddIoUInput(corners); AddInputFromArray<float>(TensorShape({num_boxes}), scores); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected, {0}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInconsistentBoxAndScoreShapes) { MakeOp(); AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f, 0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101}); AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape")) << s; } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInvalidOverlapsShape) { MakeOp(); AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0}); AddInputFromArray<float>(TensorShape({2}), {0.5f, 0.5f}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {0.f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "overlaps must be square")) << s; } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdGreaterOne) { MakeOp(); AddIoUInput({0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {1.2f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdSmallerZero) { MakeOp(); AddIoUInput({0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {-0.2f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); } TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestEmptyInput) { MakeOp(); AddIoUInput({}); AddInputFromArray<float>(TensorShape({0}), {}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_INT32, TensorShape({0})); test::FillValues<int>(&expected, {}); test::ExpectTensorEqual<int>(expected, *GetOutput(0)); } class CombinedNonMaxSuppressionOpTest : public OpsTestBase { protected: void MakeOp(bool pad_per_class = false, bool clip_boxes = true) { TF_EXPECT_OK(NodeDefBuilder("combined_non_max_suppression_op", "CombinedNonMaxSuppression") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("pad_per_class", pad_per_class) .Attr("clip_boxes", clip_boxes) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; TEST_F(CombinedNonMaxSuppressionOpTest, TestEmptyInput) { MakeOp(); AddInputFromArray<float>(TensorShape({0, 0, 0, 4}), {}); AddInputFromArray<float>(TensorShape({0, 0, 0}), {}); AddInputFromArray<int>(TensorShape({}), {30}); AddInputFromArray<int>(TensorShape({}), {10}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({0, 10, 4})); test::FillValues<float>(&expected_boxes, {}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({0, 10})); test::FillValues<float>(&expected_scores, {}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({0, 10})); test::FillValues<float>(&expected_classes, {}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({0})); test::FillValues<int>(&expected_valid_d, {}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClusters) { MakeOp(); AddInputFromArray<float>( TensorShape({1, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4}); AddInputFromArray<float>(TensorShape({1, 6, 1}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.3, 1, 0.4}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_classes, {0, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected_valid_d, {3}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClustersNoBoxClipping) { MakeOp(false, false); AddInputFromArray<float>(TensorShape({1, 6, 1, 4}), {0, 0, 10, 10, 0, 1, 10, 11, 0, 1, 10, 9, 0, 11, 10, 20, 0, 12, 10, 21, 0, 30, 100, 40}); AddInputFromArray<float>(TensorShape({1, 6, 1}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 11, 10, 20, 0, 0, 10, 10, 0, 30, 100, 40}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_classes, {0, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected_valid_d, {3}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClustersWithScoreThreshold) { MakeOp(); AddInputFromArray<float>( TensorShape({1, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4}); AddInputFromArray<float>(TensorShape({1, 6, 1}), {.9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3})); test::FillValues<float>(&expected_classes, {0, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected_valid_d, {2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClustersWithScoreThresholdZeroScores) { MakeOp(); AddInputFromArray<float>( TensorShape({1, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4}); AddInputFromArray<float>(TensorShape({1, 6, 1}), {.1f, 0, 0, .3f, .2f, -5.0f}); AddInputFromArray<int>(TensorShape({}), {4}); AddInputFromArray<int>(TensorShape({}), {5}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {-3.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 5, 4})); test::FillValues<float>( &expected_boxes, { 0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 5})); test::FillValues<float>(&expected_scores, {0.3, 0.1, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 5})); test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected_valid_d, {2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectSingleBox) { MakeOp(); AddInputFromArray<float>(TensorShape({1, 1, 1, 4}), {0, 0, 1, 1}); AddInputFromArray<float>(TensorShape({1, 1, 1}), {.9f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {1}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 1, 4})); test::FillValues<float>(&expected_boxes, {0, 0, 1, 1}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 1})); test::FillValues<float>(&expected_scores, {0.9}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 1})); test::FillValues<float>(&expected_classes, {0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1})); test::FillValues<int>(&expected_valid_d, {1}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesWithScoreThreshold) { MakeOp(); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>( TensorShape({2, 6, 1}), {.9f, .75f, .6f, .95f, .5f, .3f, .9f, .75f, .6f, .95f, .5f, .3f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.4f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {2, 2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClasses) { MakeOp(); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); test::FillValues<float>( &expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.75, 0.95, 0.9, 0.75}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {3, 3}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClassesWithScoreThreshold) { MakeOp(); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.8f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {2, 2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedTotalSize) { MakeOp(true); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {10}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.8f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); test::FillValues<float>(&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {2, 2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedPerClass) { MakeOp(true); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {2}); AddInputFromArray<int>(TensorShape({}), {50}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.8f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 4, 4})); test::FillValues<float>( &expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 4})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0, 0.95, 0.9, 0, 0}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 4})); test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 0, 1, 0, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {2, 2}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClassesTotalSize) { MakeOp(); AddInputFromArray<float>( TensorShape({2, 6, 1, 4}), {0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {5}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.1f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 5, 4})); test::FillValues<float>( &expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 5})); test::FillValues<float>( &expected_scores, {0.95, 0.9, 0.75, 0.5, 0.3, 0.95, 0.9, 0.75, 0.5, 0.3}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 5})); test::FillValues<float>(&expected_classes, {0, 1, 0, 1, 0, 0, 1, 0, 1, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {5, 5}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClassesForBoxesAndScores) { MakeOp(); AddInputFromArray<float>( TensorShape({2, 6, 2, 4}), {0, 0, 0.1, 0.1, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.6f, 0.1, 0.7f, 0, -0.01, 0.1, 0.09f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0, 0.3, 1, 0.4, 0, 0, 0.2, 0.2, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5, 0, 0.4, 1, 0.5}); AddInputFromArray<float>(TensorShape({2, 6, 2}), {0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<int>(TensorShape({}), {3}); AddInputFromArray<float>(TensorShape({}), {.5f}); AddInputFromArray<float>(TensorShape({}), {0.0f}); TF_ASSERT_OK(RunOpKernel()); Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); test::FillValues<float>( &expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.6f, 0.1, 0.7f, 0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f}); test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0)); Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.8, 0.95, 0.9, 0.75}); test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1)); Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3})); test::FillValues<float>(&expected_classes, {0, 1, 1, 0, 1, 0}); test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2)); Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2})); test::FillValues<int>(&expected_valid_d, {3, 3}); test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c3e707eb-aa99-42f0-8beb-c5644b98862f
cpp
tensorflow/tensorflow
device_propagation
tensorflow/core/common_runtime/device_propagation.cc
tensorflow/core/common_runtime/device_propagation_test.cc
#include "tensorflow/core/common_runtime/device_propagation.h" #include <string> #include <utility> #include "absl/container/flat_hash_set.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { namespace { const std::string& AssignedOrRequestedDevice(const Node& node) { if (!node.assigned_device_name().empty()) { return node.assigned_device_name(); } return node.requested_device(); } bool UpdateDeviceFromInputs( const device_propagation::NodeFilter& node_filter, const device_propagation::DeviceFilter& device_filter, Node* node) { if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) { return false; } string proposed_device = ""; Node* proposed_src = nullptr; for (const Edge* e : node->in_edges()) { if (e->IsControlEdge()) { continue; } Node* src = e->src(); const string& src_device = AssignedOrRequestedDevice(*src); if ((node->IsSwitch() && src->IsLoopCond()) || (node->IsMerge() && src->IsEnter())) { continue; } if (!device_filter(src_device)) return false; if (proposed_src == nullptr) { proposed_device = src_device; proposed_src = src; } else if (proposed_device != src_device) { return false; } } if (proposed_src) { node->set_assigned_device_name(proposed_src->assigned_device_name()); node->set_requested_device(proposed_src->requested_device()); return true; } else { return false; } } } void PropagateDevices(const device_propagation::NodeFilter& node_filter, const device_propagation::DeviceFilter& device_filter, Graph* graph) { bool nodes_changed = true; while (nodes_changed) { nodes_changed = false; BreadthFirstTraversal( *graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) { nodes_changed |= UpdateDeviceFromInputs(node_filter, device_filter, node); }); } } }
#include "tensorflow/core/common_runtime/device_propagation.h" #include <string> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/match.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/control_flow_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status_test_util.h" using ::testing::UnorderedElementsAreArray; namespace tensorflow { namespace { const char kTpu0[] = "/job:localhost/replica:0/task:0/device:TPU:0"; const char kTpu1[] = "/job:localhost/replica:0/task:0/device:TPU:1"; const char kTpu2[] = "/job:localhost/replica:0/task:0/device:TPU:2"; const char kGpu0[] = "/job:localhost/replica:0/task:0/device:GPU:0"; bool IsTPUDevice(StringPiece device_name) { return absl::StrContains(device_name, "device:TPU:"); } device_propagation::NodeFilter TargetOps( const absl::flat_hash_set<std::string>& ops) { return [&ops](const Node& n) { return ops.contains(n.type_string()); }; } absl::flat_hash_map<std::string, std::string> GetNodeNameDevices( const Graph& graph) { absl::flat_hash_map<std::string, std::string> node_name_devices; for (const Node* node : graph.nodes()) { if (node->IsSource() || node->IsSink()) { continue; } const string& device = node->assigned_device_name().empty() ? node->requested_device() : node->assigned_device_name(); node_name_devices[node->name()] = device; } return node_name_devices; } TEST(DevicePropagationTest, PropagateTPUDevices) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT); a.node()->set_assigned_device_name(kTpu0); auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); b.node()->set_assigned_device_name(kTpu1); auto c = ops::Identity(scope.WithOpName("C"), a); auto d = ops::Merge(scope.WithOpName("D"), std::initializer_list<Input>{a, c}); auto e = ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{b, c}); auto f = ops::Identity(scope.WithOpName("F"), a); f.node()->set_assigned_device_name(kTpu2); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); PropagateDevices(TargetOps({"Identity", "Merge"}), IsTPUDevice, &graph); EXPECT_THAT( GetNodeNameDevices(graph), UnorderedElementsAreArray( std::vector<std::pair<std::string, std::string>>{ {"A", kTpu0}, {"B", kTpu1}, {"C", kTpu0}, {"D", kTpu0}, {"E", ""}, {"F", kTpu2}, })); } TEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT); a.node()->set_assigned_device_name(kTpu0); auto b = ops::Identity(scope.WithOpName("B"), a); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); PropagateDevices(TargetOps({"Merge"}), IsTPUDevice, &graph); EXPECT_THAT(GetNodeNameDevices(graph), UnorderedElementsAreArray( std::vector<std::pair<std::string, std::string>>{ {"A", kTpu0}, {"B", ""}, })); } TEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT); a.node()->set_assigned_device_name(kGpu0); auto b = ops::Identity(scope.WithOpName("B"), a); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); PropagateDevices(TargetOps({"Identity"}), IsTPUDevice, &graph); EXPECT_THAT(GetNodeNameDevices(graph), UnorderedElementsAreArray( std::vector<std::pair<std::string, std::string>>{ {"A", kGpu0}, {"B", ""}, })); } TEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_BOOL); auto b = ops::LoopCond(scope.WithOpName("B"), a); auto c = ops::Placeholder(scope.WithOpName("C"), DT_FLOAT); c.node()->set_assigned_device_name(kTpu2); auto d = ops::Switch(scope.WithOpName("D"), c, b); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); PropagateDevices(TargetOps({"Switch", "LoopCond"}), IsTPUDevice, &graph); EXPECT_THAT( GetNodeNameDevices(graph), UnorderedElementsAreArray(std::vector< std::pair<std::string, std::string>>{ {"A", ""}, {"B", ""}, {"C", kTpu2}, {"D", kTpu2}, })); } TEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT); auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); b.node()->set_assigned_device_name(kTpu2); auto c = ops::internal::Enter(scope.WithOpName("C"), a, "Enter"); auto d = ops::NextIteration(scope.WithOpName("D"), b); auto e = ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{c, d}); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); PropagateDevices(TargetOps({"Enter", "Merge", "NextIteration"}), IsTPUDevice, &graph); EXPECT_THAT( GetNodeNameDevices(graph), UnorderedElementsAreArray(std::vector< std::pair<std::string, std::string>>{ {"A", ""}, {"B", kTpu2}, {"C", ""}, {"D", kTpu2}, {"E", kTpu2}, })); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7a493795-3ee7-4cb2-a7b7-639d43aba311
cpp
tensorflow/tensorflow
softmax
tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc
tensorflow/lite/delegates/xnnpack/softmax_test.cc
#if GOOGLE_CUDA && GOOGLE_TENSORRT #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h" #include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h" #include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h" namespace tensorflow { namespace tensorrt { namespace convert { class ConvertSoftmax : public OpConverterBase<ConvertSoftmax> { public: explicit ConvertSoftmax(const OpConverterParams *params) : OpConverterBase<ConvertSoftmax>(params) {} static constexpr std::array<DataType, 3> AllowedDataTypes() { return {DataType::DT_FLOAT, DataType::DT_HALF}; } static constexpr std::array<InputArgSpec, 1> InputSpec() { return std::array<InputArgSpec, 1>{ InputArgSpec::Create("logits", TrtInputArg::kTensor)}; } Status Validate() { const auto &params = *this->params_; const auto &inputs = params.inputs; ITensorProxyPtr logits_tensor = inputs.at(0).tensor(); const int num_trt_dims = logits_tensor->getDimensions().nbDims; if (!num_trt_dims && params.use_implicit_batch) { return errors::InvalidArgument( "TensorRT Softmax cannot apply on the batch dimension"); } return OkStatus(); } Status Convert() { const auto &params = *this->params_; const auto &inputs = params.inputs; const auto &node_def = params.node_def; ITensorProxyPtr logits_tensor = inputs.at(0).tensor(); const int num_trt_dims = logits_tensor->getDimensions().nbDims; nvinfer1::ISoftMaxLayer *layer = params.converter->network()->addSoftMax(*logits_tensor->trt_tensor()); TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name()); params.converter->SetLayerName(layer, node_def); layer->setAxes(1 << (num_trt_dims - 1)); ITensorProxyPtr output_tensor = layer->getOutput(0); params.outputs->push_back(TRT_TensorOrWeights(output_tensor)); return OkStatus(); } }; REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertSoftmax>(), "Softmax"); } } } #endif
#include <cstdint> #include <functional> #include <memory> #include <random> #include <gtest/gtest.h> #include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/delegates/xnnpack/softmax_tester.h" #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" namespace tflite { namespace xnnpack { TEST(Softmax, 4D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto height = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); SoftmaxTester() .Shape({batch, height, width, channels}) .Test(xnnpack_delegate.get()); } TEST(Softmax, 3D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); SoftmaxTester().Shape({batch, width, channels}).Test(xnnpack_delegate.get()); } TEST(Softmax, 2D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto channels = shape_rng(); SoftmaxTester().Shape({batch, channels}).Test(xnnpack_delegate.get()); } TEST(Softmax, 1D) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); SoftmaxTester().Shape({batch}).Test(xnnpack_delegate.get()); } TEST(Softmax, DISABLED_Beta) { std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto height = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); SoftmaxTester() .Shape({batch, height, width, channels}) .Beta(0.1f) .Test(xnnpack_delegate.get()); SoftmaxTester() .Shape({batch, height, width, channels}) .Beta(10.0f) .Test(xnnpack_delegate.get()); } TEST(Softmax, MultiThreading) { TfLiteXNNPackDelegateOptions delegate_options = TfLiteXNNPackDelegateOptionsDefault(); delegate_options.num_threads = 2; std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)> xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options), TfLiteXNNPackDelegateDelete); std::random_device random_device; auto rng = std::mt19937(random_device()); auto shape_rng = std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng)); const auto batch = shape_rng(); const auto height = shape_rng(); const auto width = shape_rng(); const auto channels = shape_rng(); SoftmaxTester() .Shape({batch, height, width, channels}) .Test(xnnpack_delegate.get()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/softmax_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f03a3830-3544-489a-837c-11b8e6cc9e8f
cpp
google/quiche
load_balancer_server_id
quiche/quic/load_balancer/load_balancer_server_id.cc
quiche/quic/load_balancer/load_balancer_server_id_test.cc
#include "quiche/quic/load_balancer/load_balancer_server_id.h" #include <array> #include <cstdint> #include <cstring> #include <string> #include "absl/strings/escaping.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" namespace quic { LoadBalancerServerId::LoadBalancerServerId(absl::string_view data) : LoadBalancerServerId(absl::MakeSpan( reinterpret_cast<const uint8_t*>(data.data()), data.length())) {} LoadBalancerServerId::LoadBalancerServerId(absl::Span<const uint8_t> data) : length_(data.length()) { if (length_ == 0 || length_ > kLoadBalancerMaxServerIdLen) { QUIC_BUG(quic_bug_433312504_02) << "Attempted to create LoadBalancerServerId with length " << static_cast<int>(length_); length_ = 0; return; } memcpy(data_.data(), data.data(), data.length()); } void LoadBalancerServerId::set_length(uint8_t length) { QUIC_BUG_IF(quic_bug_599862571_01, length == 0 || length > kLoadBalancerMaxServerIdLen) << "Attempted to set LoadBalancerServerId length to " << static_cast<int>(length); length_ = length; } std::string LoadBalancerServerId::ToString() const { return absl::BytesToHexString( absl::string_view(reinterpret_cast<const char*>(data_.data()), length_)); } }
#include "quiche/quic/load_balancer/load_balancer_server_id.h" #include <cstdint> #include <cstring> #include "absl/hash/hash_testing.h" #include "absl/types/span.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace test { namespace { class LoadBalancerServerIdTest : public QuicTest {}; constexpr uint8_t kRawServerId[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}; TEST_F(LoadBalancerServerIdTest, CreateReturnsNullIfTooLong) { EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerServerId( absl::Span<const uint8_t>(kRawServerId, 16)) .IsValid()), "Attempted to create LoadBalancerServerId with length 16"); EXPECT_QUIC_BUG( EXPECT_FALSE(LoadBalancerServerId(absl::Span<const uint8_t>()).IsValid()), "Attempted to create LoadBalancerServerId with length 0"); } TEST_F(LoadBalancerServerIdTest, CompareIdenticalExceptLength) { LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15)); ASSERT_TRUE(server_id.IsValid()); EXPECT_EQ(server_id.length(), 15); LoadBalancerServerId shorter_server_id( absl::Span<const uint8_t>(kRawServerId, 5)); ASSERT_TRUE(shorter_server_id.IsValid()); EXPECT_EQ(shorter_server_id.length(), 5); EXPECT_TRUE(shorter_server_id < server_id); EXPECT_FALSE(server_id < shorter_server_id); EXPECT_FALSE(shorter_server_id == server_id); } TEST_F(LoadBalancerServerIdTest, AccessorFunctions) { LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 5)); EXPECT_TRUE(server_id.IsValid()); EXPECT_EQ(server_id.length(), 5); EXPECT_EQ(memcmp(server_id.data().data(), kRawServerId, 5), 0); EXPECT_EQ(server_id.ToString(), "0001020304"); } TEST_F(LoadBalancerServerIdTest, CompareDifferentServerIds) { LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 5)); ASSERT_TRUE(server_id.IsValid()); LoadBalancerServerId reverse({0x0f, 0x0e, 0x0d, 0x0c, 0x0b}); ASSERT_TRUE(reverse.IsValid()); EXPECT_TRUE(server_id < reverse); LoadBalancerServerId long_server_id( absl::Span<const uint8_t>(kRawServerId, 15)); EXPECT_TRUE(long_server_id < reverse); } TEST_F(LoadBalancerServerIdTest, EqualityOperators) { LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15)); ASSERT_TRUE(server_id.IsValid()); LoadBalancerServerId shorter_server_id( absl::Span<const uint8_t>(kRawServerId, 5)); ASSERT_TRUE(shorter_server_id.IsValid()); EXPECT_FALSE(server_id == shorter_server_id); LoadBalancerServerId server_id2 = server_id; EXPECT_TRUE(server_id == server_id2); } TEST_F(LoadBalancerServerIdTest, SupportsHash) { LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15)); ASSERT_TRUE(server_id.IsValid()); LoadBalancerServerId shorter_server_id( absl::Span<const uint8_t>(kRawServerId, 5)); ASSERT_TRUE(shorter_server_id.IsValid()); LoadBalancerServerId different_server_id({0x0f, 0x0e, 0x0d, 0x0c, 0x0b}); ASSERT_TRUE(different_server_id.IsValid()); EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ server_id, shorter_server_id, different_server_id, })); } TEST_F(LoadBalancerServerIdTest, SetLengthInvalid) { LoadBalancerServerId server_id; EXPECT_QUIC_BUG(server_id.set_length(16), "Attempted to set LoadBalancerServerId length to 16"); EXPECT_QUIC_BUG(server_id.set_length(0), "Attempted to set LoadBalancerServerId length to 0"); server_id.set_length(1); EXPECT_EQ(server_id.length(), 1); server_id.set_length(15); EXPECT_EQ(server_id.length(), 15); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
642de5a1-8571-42ec-86b3-0c039e9b3c62
cpp
tensorflow/tensorflow
device_compilation_profiler
tensorflow/compiler/jit/device_compilation_profiler.cc
tensorflow/compiler/jit/device_compilation_profiler_test.cc
#include "tensorflow/compiler/jit/device_compilation_profiler.h" #include <cstdint> #include <optional> #include <string> #include <utility> #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/compiler/jit/xla_activity_listener.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/mutex.h" namespace tensorflow { namespace { bool ShouldBeMegamorphic(int64_t compile_count, int64_t execution_count) { const int64_t kCompileThreshold = 10; const int64_t kMinExecutionsPerCompile = 50; return compile_count > kCompileThreshold && execution_count < kMinExecutionsPerCompile * compile_count; } void RegisterExecutionForCluster( const NameAttrList& function, DeviceCompilationProfiler::ClusterCompileStats* stats) { ++stats->execution_count; if (!stats->is_megamorphic && ShouldBeMegamorphic(stats->compile_count, stats->execution_count)) { VLOG(1) << "Marking " << function.name() << " as megamorphic, compile_count=" << stats->compile_count << " execution_count=" << stats->execution_count; stats->is_megamorphic = true; } } constexpr int64_t kDefaultCompilationThreshold = 2; constexpr int64_t kMaxNumOngoingCompilations = kNumAsyncDeviceCompilerThreads; } DeviceCompilationProfiler::~DeviceCompilationProfiler() { mutex_lock lock(mu_); cluster_compile_stats_.clear(); } absl::StatusOr<DeviceCompilationProfiler::ClusterCompileStats> DeviceCompilationProfiler::GetCompileStats(const NameAttrList& function) const { mutex_lock lock(mu_); if (auto it = cluster_compile_stats_.find(function.name()); it != cluster_compile_stats_.end()) { return it->second; } return errors::NotFound("Couldn't find compilation stats for cluster: ", function.name()); } void DeviceCompilationProfiler::RegisterExecution( const NameAttrList& function) { mutex_lock lock(mu_); auto it = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}) .first; RegisterExecutionForCluster(function, &it->second); } Status DeviceCompilationProfiler::RegisterCompilation( const NameAttrList& function, int64_t compile_time_us, bool used_persistent_cache) { metrics::UpdateXlaCompilationTime(compile_time_us); const std::string& function_name = function.name(); mutex_lock lock(mu_); auto it = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}) .first; const uint64 compile_time_s = compile_time_us / 1.0e6; it->second.compile_count++; it->second.cumulative_compile_time_us += compile_time_us; VLOG(1) << "Compiled " << function_name << " " << it->second.compile_count << " times, compile time: " << compile_time_us << " us, cumulative: " << it->second.cumulative_compile_time_us << " us (" << tensorflow::strings::HumanReadableElapsedTime(compile_time_s) << " / " << tensorflow::strings::HumanReadableElapsedTime( it->second.cumulative_compile_time_us / 1.0e6) << ")"; XlaJitCompilationActivity jit_compilation_activity; jit_compilation_activity.set_cluster_name(function_name); jit_compilation_activity.set_compile_count(it->second.compile_count); jit_compilation_activity.set_compile_time_us(compile_time_us); jit_compilation_activity.set_cumulative_compile_time_us( it->second.cumulative_compile_time_us); jit_compilation_activity.set_used_persistent_cache(used_persistent_cache); return BroadcastXlaActivity(std::move(jit_compilation_activity)); } bool DeviceCompilationProfiler::ShouldCompileCluster( const NameAttrList& function, DeviceCompileMode compile_mode, int64_t current_request_count) { std::optional<int64_t> compile_threshold; if (compile_mode == DeviceCompileMode::kLazy) { compile_threshold = kDefaultCompilationThreshold; } else if (compile_mode == DeviceCompileMode::kAsync) { compile_threshold = 0; } if (compile_mode == DeviceCompileMode::kStrict) { return true; } mutex_lock lock(mu_); auto [it, cluster_not_found] = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}); if (cluster_not_found) { RegisterExecutionForCluster(function, &it->second); } if (it->second.is_megamorphic) { BroadcastOptimizationRemark(XlaOptimizationRemark::MEGAMORPHIC_FUNCTION, function.name()) .IgnoreError(); VLOG(2) << "Not compiling cluster " << function.name() << " because it is megamorphic."; return false; } if (it->second.execution_count == 1) { return true; } if (compile_mode == DeviceCompileMode::kAsync) { if (num_ongoing_compilations_ >= kMaxNumOngoingCompilations) { VLOG(2) << "Not asynchronously compiling cluster " << function.name() << " because of too many ongoing compilations."; return false; } } bool reached_compile_threshold = current_request_count >= *compile_threshold; if (!reached_compile_threshold) { VLOG(2) << "Not compiling cluster " << function.name() << " because it has not reached compile threshold; threshold is " << *compile_threshold << " execution count " << current_request_count << "."; } return reached_compile_threshold; } void DeviceCompilationProfiler::IncrementOngoingAsyncCompilations() { mutex_lock lock(mu_); num_ongoing_compilations_++; } void DeviceCompilationProfiler::DecrementOngoingAsyncCompilations() { mutex_lock lock(mu_); num_ongoing_compilations_--; } int64_t DeviceCompilationProfiler::GetNumOngoingAsyncCompilations() const { mutex_lock lock(mu_); return num_ongoing_compilations_; } std::string DeviceCompilationProfiler::DebugString() const { std::string debug_string = "DeviceCompilationProfiler {\ncluster_compile_stats: {\n"; { mutex_lock lock(mu_); for (const auto& [key, stats] : cluster_compile_stats_) { absl::StrAppend(&debug_string, key, ": ", stats.DebugString(), "\n"); } } absl::StrAppend(&debug_string, "}\nnum_ongoing_compilations=", GetNumOngoingAsyncCompilations(), "\n}\n"); return debug_string; } }
#include "tensorflow/compiler/jit/device_compilation_profiler.h" #include <memory> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" namespace tensorflow { namespace { TEST(DeviceCompilationProfilerTest, RegisterExecution) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); for (int i = 0; i < 5; ++i) { profiler->RegisterExecution(function); } TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_EQ(stats.execution_count, 5); } TEST(DeviceCompilationProfilerTest, RegisterCompilation) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); auto listener = std::make_unique<JitCompilationListener>(); auto listener_ptr = listener.get(); RegisterXlaActivityListener(std::move(listener)); NameAttrList function; function.set_name("TestFunc"); std::vector<XlaJitCompilationActivity> expected_activities; for (int i = 0; i < 5; ++i) { EXPECT_TRUE(profiler->RegisterCompilation(function, 4, false).ok()); TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); XlaJitCompilationActivity expected_activity; expected_activity.set_cluster_name(function.name()); expected_activity.set_compile_count(stats.compile_count); expected_activity.set_compile_time_us(4); expected_activity.set_cumulative_compile_time_us( stats.cumulative_compile_time_us); expected_activity.set_used_persistent_cache(false); expected_activities.push_back(expected_activity); } TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_EQ(stats.compile_count, 5); EXPECT_EQ(stats.cumulative_compile_time_us, 5 * 4); const auto& actual_activities = listener_ptr->GetListenerHistory(); EXPECT_EQ(actual_activities.size(), expected_activities.size()); for (size_t i = 0; i < actual_activities.size(); ++i) { EXPECT_EQ(actual_activities[i].SerializeAsString(), expected_activities[i].SerializeAsString()); } } TEST(DeviceCompilationProfilerTest, OngoingAsyncCompilations) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); for (int i = 0; i < 5; ++i) { profiler->IncrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 5); for (int i = 0; i < 5; ++i) { profiler->DecrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0); for (int i = 0; i < 5; ++i) { profiler->IncrementOngoingAsyncCompilations(); profiler->DecrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterNotFound) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterFirstExecution) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterMegamorphic) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); const int64_t kCompileThreshold = 10; const int64_t kMinExecutionsPerCompile = 50; for (int i = 0; i < kCompileThreshold + 1; ++i) { EXPECT_TRUE(profiler->RegisterCompilation(function, 1, false).ok()); } profiler->RegisterExecution(function); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_TRUE(stats.is_megamorphic); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); for (int i = 0; i < kCompileThreshold * kMinExecutionsPerCompile + 1; ++i) { profiler->RegisterExecution(function); } EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); TF_ASSERT_OK_AND_ASSIGN(stats, profiler->GetCompileStats(function)); EXPECT_TRUE(stats.is_megamorphic); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterAsync) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); const int64_t kMaxNumOngoingCompilations = 10; for (int i = 0; i < kMaxNumOngoingCompilations; ++i) { profiler->IncrementOngoingAsyncCompilations(); } profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); profiler->RegisterExecution(function); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); profiler->DecrementOngoingAsyncCompilations(); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterLazy) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); constexpr int64_t kDefaultCompilationThreshold = 2; profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); profiler->RegisterExecution(function); for (int current_request_count = 0; current_request_count < kDefaultCompilationThreshold; ++current_request_count) { EXPECT_FALSE(profiler->ShouldCompileCluster( function, DeviceCompileMode::kLazy, current_request_count)); } EXPECT_TRUE(profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, kDefaultCompilationThreshold)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
24bb0006-b05f-484b-99d7-4f82b5dc15ee
cpp
google/tensorstore
executor
tensorstore/util/executor.h
tensorstore/util/executor_test.cc
#ifndef TENSORSTORE_UTIL_EXECUTOR_H_ #define TENSORSTORE_UTIL_EXECUTOR_H_ #include <functional> #include <type_traits> #include <utility> #include "absl/base/attributes.h" #include "absl/functional/any_invocable.h" #include "absl/meta/type_traits.h" #include "tensorstore/internal/poly/poly.h" #include "tensorstore/internal/type_traits.h" namespace tensorstore { using ExecutorTask = absl::AnyInvocable<void() &&>; using Executor = poly::Poly<0, true, void(ExecutorTask) const>; class InlineExecutor { public: template <typename Func> void operator()(Func&& func) const { std::forward<Func>(func)(); } }; template <typename ExecutorType, typename FunctionType> class ExecutorBoundFunction { public: using Executor = ExecutorType; using Function = FunctionType; template <typename... T> std::enable_if_t<std::is_invocable_v<Function&, T...>> operator()(T&&... arg) { executor(std::bind(std::move(function), std::forward<T>(arg)...)); } template <typename... T> std::enable_if_t<std::is_invocable_v<const Function&, T...>> operator()( T&&... arg) const { executor(std::bind(function, std::forward<T>(arg)...)); } ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Executor executor; ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Function function; }; template <typename Executor, typename Function> std::enable_if_t< !std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>, ExecutorBoundFunction<absl::remove_cvref_t<Executor>, absl::remove_cvref_t<Function>>> WithExecutor(Executor&& executor, Function&& function) { return {std::forward<Executor>(executor), std::forward<Function>(function)}; } template <typename Executor, typename Function> std::enable_if_t<std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>, Function&&> WithExecutor(Executor&& executor, Function&& function) { return std::forward<Function>(function); } } #endif
#include "tensorstore/util/executor.h" #include <functional> #include <memory> #include <gtest/gtest.h> namespace { using ::tensorstore::Executor; using ::tensorstore::InlineExecutor; using ::tensorstore::WithExecutor; TEST(InlineExecutorTest, Basic) { Executor executor = InlineExecutor{}; bool invoked = false; executor([&] { invoked = true; }); EXPECT_TRUE(invoked); } TEST(WithExecutorTest, NonConst) { InlineExecutor executor; bool invoked = false; struct Func { void operator()(bool* x) const = delete; void operator()(bool* x) { *x = true; } }; auto with_executor = WithExecutor(executor, Func{}); with_executor(&invoked); EXPECT_TRUE(invoked); } TEST(WithExecutorTest, Const) { InlineExecutor executor; bool invoked = false; struct Func { void operator()(bool* x) const { *x = true; } void operator()(bool*) = delete; }; const auto with_executor = WithExecutor(executor, Func{}); with_executor(&invoked); EXPECT_TRUE(invoked); } TEST(ExecutorTest, MoveOnly) { Executor executor = InlineExecutor{}; int value = 0; executor(std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; }, std::make_unique<int>(3))); EXPECT_EQ(3, value); } TEST(WithExecutorTest, MoveOnly) { Executor executor = InlineExecutor{}; int value = 0; auto with_executor = WithExecutor( executor, std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; }, std::make_unique<int>(3))); with_executor(); EXPECT_EQ(3, value); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor.h
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor_test.cc
4f887a6430414cd6088e1743555015b10f116d50
e5eb671c-fe51-4224-9402-beab750cbaee
cpp
tensorflow/tensorflow
float8
tensorflow/core/platform/float8.h
third_party/xla/xla/tests/float8_test.cc
#ifndef TENSORFLOW_CORE_PLATFORM_FLOAT8_H_ #define TENSORFLOW_CORE_PLATFORM_FLOAT8_H_ #include "tsl/platform/ml_dtypes.h" namespace tensorflow { typedef tsl::float8_e4m3fn float8_e4m3fn; typedef tsl::float8_e5m2 float8_e5m2; } #endif
#include <cmath> #include <memory> #include <vector> #include <gtest/gtest.h> #include "xla/hlo/builder/xla_builder.h" #include "xla/test.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/test_macros.h" #include "tsl/platform/ml_dtypes.h" namespace xla { namespace { template <typename T> class Float8Test : public ClientLibraryTestBase {}; using DataTypes = ::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3, tsl::float8_e4m3fn, tsl::float8_e3m4>; TYPED_TEST_SUITE(Float8Test, DataTypes); XLA_TYPED_TEST(Float8Test, ScalarOperation) { XlaBuilder builder(this->TestName()); auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f)); auto y = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(1.0f)); Add(x, y); this->template ComputeAndCompareR0<TypeParam>( &builder, static_cast<TypeParam>(3.0f), {}); } XLA_TYPED_TEST(Float8Test, LogOperation) { XlaBuilder builder(this->TestName()); auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(4.0f)); Log(x); this->template ComputeAndCompareR0<TypeParam>( &builder, static_cast<TypeParam>(1.387f), {}); } XLA_TYPED_TEST(Float8Test, CompareOperation) { XlaBuilder builder(this->TestName()); auto x = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{2.0}}); auto y = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{3.0}}); Eq(x, y); this->template ComputeAndCompareR1<bool>(&builder, {true, false}, {}); } XLA_TYPED_TEST(Float8Test, DotOperation) { XlaBuilder builder(this->TestName()); auto x = ConstantR2<TypeParam>(&builder, {{TypeParam{0.0}, TypeParam{1.0}}, {TypeParam{2.0}, TypeParam{3.0}}}); auto y = ConstantR2<TypeParam>(&builder, {{TypeParam{3.0}, TypeParam{2.0}}, {TypeParam{1.0}, TypeParam{0.0}}}); Dot(x, y); this->template ComputeAndCompareR2<TypeParam>( &builder, {{TypeParam{1.0}, TypeParam{0.0}}, {TypeParam{9.0}, TypeParam{4.0}}}, {}); } XLA_TYPED_TEST(Float8Test, NegateScalar) { XlaBuilder builder(this->TestName()); Neg(ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f))); this->template ComputeAndCompareR0<TypeParam>( &builder, static_cast<TypeParam>(-2.0f), {}); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/float8.h
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/float8_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2c0d47c7-2213-4e44-90ad-237902404512
cpp
google/cel-cpp
timestamp_type
common/types/timestamp_type.h
common/types/timestamp_type_test.cc
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_ #define THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_ #include <ostream> #include <string> #include <utility> #include "absl/strings/string_view.h" #include "common/type_kind.h" namespace cel { class Type; class TypeParameters; class TimestampType final { public: static constexpr TypeKind kKind = TypeKind::kTimestamp; static constexpr absl::string_view kName = "google.protobuf.Timestamp"; TimestampType() = default; TimestampType(const TimestampType&) = default; TimestampType(TimestampType&&) = default; TimestampType& operator=(const TimestampType&) = default; TimestampType& operator=(TimestampType&&) = default; static TypeKind kind() { return kKind; } static absl::string_view name() { return kName; } static TypeParameters GetParameters(); static std::string DebugString() { return std::string(name()); } constexpr void swap(TimestampType&) noexcept {} }; inline constexpr void swap(TimestampType& lhs, TimestampType& rhs) noexcept { lhs.swap(rhs); } inline constexpr bool operator==(TimestampType, TimestampType) { return true; } inline constexpr bool operator!=(TimestampType lhs, TimestampType rhs) { return !operator==(lhs, rhs); } template <typename H> H AbslHashValue(H state, TimestampType) { return std::move(state); } inline std::ostream& operator<<(std::ostream& out, const TimestampType& type) { return out << type.DebugString(); } } #endif
#include <sstream> #include "absl/hash/hash.h" #include "common/type.h" #include "internal/testing.h" namespace cel { namespace { TEST(TimestampType, Kind) { EXPECT_EQ(TimestampType().kind(), TimestampType::kKind); EXPECT_EQ(Type(TimestampType()).kind(), TimestampType::kKind); } TEST(TimestampType, Name) { EXPECT_EQ(TimestampType().name(), TimestampType::kName); EXPECT_EQ(Type(TimestampType()).name(), TimestampType::kName); } TEST(TimestampType, DebugString) { { std::ostringstream out; out << TimestampType(); EXPECT_EQ(out.str(), TimestampType::kName); } { std::ostringstream out; out << Type(TimestampType()); EXPECT_EQ(out.str(), TimestampType::kName); } } TEST(TimestampType, Hash) { EXPECT_EQ(absl::HashOf(TimestampType()), absl::HashOf(TimestampType())); } TEST(TimestampType, Equal) { EXPECT_EQ(TimestampType(), TimestampType()); EXPECT_EQ(Type(TimestampType()), TimestampType()); EXPECT_EQ(TimestampType(), Type(TimestampType())); EXPECT_EQ(Type(TimestampType()), Type(TimestampType())); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type.h
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type_test.cc
4552db5798fb0853b131b783d8875794334fae7f
34e66ece-9ff8-4b9c-9545-dcb0094f4b53
cpp
tensorflow/tensorflow
partitioning_utils
tensorflow/core/common_runtime/partitioning_utils.cc
tensorflow/core/common_runtime/partitioning_utils_test.cc
#include "tensorflow/core/common_runtime/partitioning_utils.h" #include <algorithm> #include <functional> #include <memory> #include <optional> #include <string> #include <unordered_map> #include <utility> #include "tensorflow/core/common_runtime/arg_ret_placement.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_partition.h" namespace tensorflow { namespace { Status PartitionFunctionGraph( const DeviceSet& device_set, Graph* graph, std::unordered_map<string, GraphDef>* partitions, std::function<string(const Node*)> node_to_loc, std::function<string(const Edge*)> get_tensor_name_attr) { PartitionOptions partition_options; if (node_to_loc != nullptr) { partition_options.node_to_loc = node_to_loc; } else { partition_options.node_to_loc = [](const Node* node) { return node->assigned_device_name(); }; } int64_t edge_name_counter = 0; partition_options.new_name = [&edge_name_counter](const string& prefix) { return strings::StrCat(prefix, "/_", ++edge_name_counter); }; partition_options.get_incarnation = [&device_set](const string& name) -> int64 { const Device* d = device_set.FindDeviceByName(name); if (d == nullptr) { return PartitionOptions::kIllegalIncarnation; } else { return d->attributes().incarnation(); } }; partition_options.control_flow_added = false; partition_options.get_tensor_name_attr = get_tensor_name_attr; partition_options.can_make_destructive_changes = true; return Partition(partition_options, graph, partitions); } struct SendRecvPair { Node* send_node = nullptr; Node* recv_node = nullptr; }; constexpr char kTensorNameAttr[] = "tensor_name"; Status MakeSendRecvDependencyExplicit(Graph* graph) { absl::flat_hash_map<std::string, SendRecvPair> send_recv_pairs; for (Node* node : graph->op_nodes()) { if (node->IsSend() || node->IsRecv()) { auto tensor_name_it = node->def().attr().find(kTensorNameAttr); if (tensor_name_it == node->def().attr().end()) { return errors::Internal( "'", kTensorNameAttr, "' attribute is not found from node: ", node->DebugString()); } if (node->IsSend()) { send_recv_pairs[tensor_name_it->second.s()].send_node = node; } else { send_recv_pairs[tensor_name_it->second.s()].recv_node = node; } } } for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) { if (send_recv_pair.send_node == nullptr || send_recv_pair.recv_node == nullptr) { return errors::Internal( "No matching Send/Recv nodes found for tensor_name = ", tensor_name); } graph->AddControlEdge(send_recv_pair.send_node, send_recv_pair.recv_node); } return absl::OkStatus(); } } Status PartitionFunctionGraph( const DeviceSet& device_set, std::unique_ptr<Graph> graph, std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs, std::function<string(const Edge*)> get_tensor_name_attr) { std::unordered_map<string, GraphDef> partitions; TF_RETURN_IF_ERROR( PartitionFunctionGraph(device_set, graph.get(), &partitions, nullptr, get_tensor_name_attr)); const OpRegistryInterface* default_registry = graph->flib_def().default_registry(); graph.reset(); for (auto& partition : partitions) { const string& device = partition.first; GraphDef& graph_def = partition.second; auto subgraph = std::make_unique<Graph>(default_registry); GraphConstructorOptions opts; opts.allow_internal_ops = true; opts.expect_device_spec = true; TF_RETURN_IF_ERROR( ConvertGraphDefToGraph(opts, std::move(graph_def), subgraph.get())); subgraphs->emplace(device, std::move(subgraph)); } return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps( const DeviceSet& device_set, std::unique_ptr<Graph> graph) { auto node_to_loc = [](const Node* node) { return node->assigned_device_name(); }; bool has_multiple_devices = false; absl::optional<std::string> location; for (const Node* node : graph->op_nodes()) { if (location) { if (*location != node_to_loc(node)) { has_multiple_devices = true; break; } } else { location = node_to_loc(node); } } if (!has_multiple_devices) { return graph; } auto new_graph = std::make_unique<Graph>(graph->flib_def()); std::unordered_map<string, GraphDef> partitions; TF_RETURN_IF_ERROR(PartitionFunctionGraph(device_set, graph.get(), &partitions, node_to_loc, nullptr)); GraphDef merged_graph_def; if (!partitions.empty()) { auto iter = partitions.begin(); merged_graph_def = std::move(iter->second); while (++iter != partitions.end()) { merged_graph_def.MergeFrom(iter->second); } } GraphConstructorOptions opts; opts.allow_internal_ops = true; opts.expect_device_spec = true; TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(merged_graph_def), new_graph.get())); TF_RETURN_IF_ERROR(MakeSendRecvDependencyExplicit(new_graph.get())); return std::move(new_graph); } Status UpdateArgAndRetvalMetadata( Graph* graph, std::vector<FunctionArgIndex>* arg_indices, std::vector<int>* ret_indices, std::vector<AllocatorAttributes>* arg_alloc_attrs, std::vector<AllocatorAttributes>* ret_alloc_attrs, bool ints_on_device) { std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes; std::vector<std::pair<Node*, int>> ret_nodes; const AttrValue* attr_value; for (Node* node : graph->op_nodes()) { if (node->IsArg()) { TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value)); int index = static_cast<int>(attr_value->i()); int sub_index = -1; if (node->attrs().Find("sub_index", &attr_value).ok()) { sub_index = static_cast<int>(attr_value->i()); } arg_nodes.emplace_back(node, FunctionArgIndex(index, sub_index)); } else if (node->IsRetval()) { TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value)); int index = static_cast<int>(attr_value->i()); ret_nodes.emplace_back(node, index); } } auto arg_comparator = [](std::pair<Node*, FunctionArgIndex> a, std::pair<Node*, FunctionArgIndex> b) { return std::tie(a.second.index, a.second.sub_index) < std::tie(b.second.index, b.second.sub_index); }; std::sort(arg_nodes.begin(), arg_nodes.end(), arg_comparator); auto ret_comparator = [](std::pair<Node*, int> a, std::pair<Node*, int> b) { return a.second < b.second; }; std::sort(ret_nodes.begin(), ret_nodes.end(), ret_comparator); arg_indices->reserve(arg_nodes.size()); for (const auto& pair : arg_nodes) arg_indices->push_back(pair.second); ret_indices->reserve(ret_nodes.size()); for (const auto& pair : ret_nodes) ret_indices->push_back(pair.second); for (int i = 0; i < arg_nodes.size(); ++i) { Node* arg = arg_nodes[i].first; arg->AddAttr("index", i); } if (arg_alloc_attrs != nullptr) { TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForArgs( arg_nodes, ints_on_device, *arg_alloc_attrs)); } for (int i = 0; i < ret_nodes.size(); ++i) { Node* ret = ret_nodes[i].first; ret->AddAttr("index", i); } if (ret_alloc_attrs) { TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForRets( ret_nodes, ints_on_device, *ret_alloc_attrs)); } return absl::OkStatus(); } string FunctionNameGenerator::GetName() { while (true) { const string candidate = strings::StrCat(name_, "_", counter_++); if (flib_def_->Find(candidate) == nullptr) { return candidate; } } } }
#include "tensorflow/core/common_runtime/partitioning_utils.h" #include <map> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/function_testlib.h" #include "tensorflow/core/common_runtime/int32_fulltype.h" #include "tensorflow/core/common_runtime/placer.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { using ::testing::SizeIs; class PartitioningUtilsTest : public ::testing::Test { public: void SetUp() override { SessionOptions options; auto* device_count = options.config.mutable_device_count(); device_count->insert({"CPU", 2}); std::vector<std::unique_ptr<Device>> devices; TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0", &devices)); device0_ = devices[0].get(); device1_ = devices[1].get(); device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices)); for (auto d : device_mgr_->ListDevices()) { device_set_.AddDevice(d); } } void SwapGraph(Graph* graph, bool assign_device = false) { Scope s = Scope::NewRootScope(); if (assign_device) { s = s.WithDevice(device0_->name()); } auto x = ops::_Arg(s.WithOpName("x"), DT_FLOAT, 0); auto y = ops::_Arg(s.WithOpName("y"), DT_FLOAT, 1); auto id_x = ops::Identity(s.WithOpName("id_x"), x); auto id_y = ops::Identity(s.WithOpName("id_y"), y); auto dx_retval = ops::_Retval(s.WithOpName("retval1"), id_y, 0); auto dy_retval = ops::_Retval(s.WithOpName("retval2"), id_x, 1); TF_ASSERT_OK(s.ToGraph(graph)); if (assign_device) { FunctionLibraryDefinition flib_def(OpRegistry::Global()); Placer placer(graph, "", &flib_def, &device_set_, device0_); TF_ASSERT_OK(placer.Run()); } } void TwoDeviceSwapGraph(Graph* graph) { Scope s = Scope::NewRootScope(); Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0"); Scope s2 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:1"); auto x = ops::_Arg(s1.WithOpName("x"), DT_FLOAT, 0); auto y = ops::_Arg(s2.WithOpName("y"), DT_FLOAT, 1); auto id_x = ops::Identity(s1.WithOpName("id_x"), x); auto id_y = ops::Identity(s2.WithOpName("id_y"), y); auto dx_retval = ops::_Retval(s2.WithOpName("retval1"), id_y, 0); auto dy_retval = ops::_Retval(s1.WithOpName("retval2"), id_x, 1); TF_ASSERT_OK(s.ToGraph(graph)); FunctionLibraryDefinition flib_def(OpRegistry::Global()); Placer placer(graph, "", &flib_def, &device_set_, device0_); TF_ASSERT_OK(placer.Run()); } void SubGraph(Graph* subgraph, DataType dtype, absl::Span<const int> arg_indices, absl::Span<const int> ret_indices) { Scope s = Scope::NewRootScope(); Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0"); CHECK_EQ(arg_indices.size(), ret_indices.size()); for (size_t i = 0; i < arg_indices.size(); ++i) { auto x = ops::_Arg(s1.WithOpName("x"), dtype, arg_indices[i]); auto id_x = ops::Identity(s1.WithOpName("id_x"), x); auto dx_retval = ops::_Retval(s1.WithOpName("retval1"), id_x, ret_indices[i]); } TF_ASSERT_OK(s.ToGraph(subgraph)); FunctionLibraryDefinition flib_def(OpRegistry::Global()); Placer placer(subgraph, "", &flib_def, &device_set_, device0_); TF_ASSERT_OK(placer.Run()); } std::unique_ptr<DeviceMgr> device_mgr_; Device* device0_ = nullptr; Device* device1_ = nullptr; DeviceSet device_set_; }; TEST_F(PartitioningUtilsTest, GraphWithoutAssignedDevicesFails) { std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global()); SwapGraph(graph.get()); std::unordered_map<string, std::unique_ptr<Graph>> subgraphs; Status status = PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs); ASSERT_TRUE(errors::IsInvalidArgument(status)) << status.ToString(); } TEST_F(PartitioningUtilsTest, OneDevice) { std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global()); SwapGraph(graph.get(), true); int num_nodes = graph->num_op_nodes(); std::unordered_map<string, std::unique_ptr<Graph>> subgraphs; Status status = PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs); ASSERT_TRUE(status.ok()) << status.ToString(); ASSERT_EQ(1, subgraphs.size()); const auto& pair = *subgraphs.begin(); ASSERT_EQ("/job:a/replica:0/task:0/device:CPU:0", pair.first); ASSERT_EQ(num_nodes, pair.second->num_op_nodes()); } TEST_F(PartitioningUtilsTest, TwoDevices) { std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global()); TwoDeviceSwapGraph(graph.get()); std::unordered_map<string, std::unique_ptr<Graph>> subgraphs; Status status = PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs); ASSERT_TRUE(status.ok()) << status.ToString(); ASSERT_EQ(2, subgraphs.size()); const auto& part1 = subgraphs["/job:a/replica:0/task:0/device:CPU:0"]; ASSERT_EQ(3, part1->num_op_nodes()); const auto& part2 = subgraphs["/job:a/replica:0/task:0/device:CPU:1"]; ASSERT_EQ(3, part2->num_op_nodes()); } TEST_F(PartitioningUtilsTest, InsertTransferOpsWithOneDevice) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); Scope scope = Scope::NewRootScope().WithDevice(device0_->name()); auto x = ops::_Arg(scope.WithOpName("x"), DT_FLOAT, 0); auto id_x = ops::Identity(scope.WithOpName("id_x"), x); auto ret_x = ops::_Retval(scope.WithOpName("ret_x"), id_x, 0); TF_ASSERT_OK(scope.ToGraph(graph.get())); FunctionLibraryDefinition flib_def(OpRegistry::Global()); Placer placer(graph.get(), "", &flib_def, &device_set_, device0_); TF_ASSERT_OK(placer.Run()); EXPECT_EQ(graph->num_op_nodes(), 3); int send_count = 0, recv_count = 0; for (const auto* op : graph->op_nodes()) { if (op->IsSend()) ++send_count; else if (op->IsRecv()) ++recv_count; } ASSERT_EQ(send_count, 0); ASSERT_EQ(recv_count, 0); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph, InsertTransferOps(device_set_, std::move(graph))); EXPECT_EQ(new_graph->num_op_nodes(), 3); send_count = recv_count = 0; for (const auto* op : new_graph->op_nodes()) { if (op->IsSend()) ++send_count; else if (op->IsRecv()) ++recv_count; } EXPECT_EQ(send_count, 0); EXPECT_EQ(recv_count, 0); } TEST_F(PartitioningUtilsTest, InsertTransferOpsWithTwoDevices) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); Scope scope = Scope::NewRootScope(); Scope scope1 = scope.WithDevice(device0_->name()); Scope scope2 = scope.WithDevice(device1_->name()); auto x = ops::_Arg(scope1.WithOpName("x"), DT_FLOAT, 0); auto id_x = ops::Identity(scope2.WithOpName("id_x"), x); auto ret_x = ops::_Retval(scope1.WithOpName("ret_x"), id_x, 0); TF_ASSERT_OK(scope.ToGraph(graph.get())); FunctionLibraryDefinition flib_def(OpRegistry::Global()); Placer placer(graph.get(), "", &flib_def, &device_set_, device0_); TF_ASSERT_OK(placer.Run()); EXPECT_EQ(graph->num_op_nodes(), 3); int send_count = 0, recv_count = 0; for (const auto* op : graph->op_nodes()) { if (op->IsSend()) ++send_count; else if (op->IsRecv()) ++recv_count; } ASSERT_EQ(send_count, 0); ASSERT_EQ(recv_count, 0); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph, InsertTransferOps(device_set_, std::move(graph))); EXPECT_EQ(new_graph->num_op_nodes(), 7); send_count = recv_count = 0; auto get_tensor_name_attr = [](const Node* node) -> std::string { auto tensor_name_it = node->def().attr().find("tensor_name"); return tensor_name_it->second.s(); }; absl::flat_hash_map<std::string, std::pair<Node*, Node*>> send_recv_pairs; for (auto* op : new_graph->op_nodes()) { if (op->IsSend()) { ++send_count; send_recv_pairs[get_tensor_name_attr(op)].first = op; } else if (op->IsRecv()) { ++recv_count; send_recv_pairs[get_tensor_name_attr(op)].second = op; } } EXPECT_EQ(send_count, 2); EXPECT_EQ(recv_count, 2); for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) { ASSERT_TRUE(send_recv_pair.first != nullptr && send_recv_pair.second != nullptr); std::vector<const Edge*> out_edges( send_recv_pair.first->out_edges().begin(), send_recv_pair.first->out_edges().end()); ASSERT_THAT(out_edges, SizeIs(2)); for (const Edge* out_edge : out_edges) { if (out_edge->dst() != new_graph->sink_node()) { EXPECT_TRUE(out_edge->IsControlEdge()); EXPECT_EQ(out_edge->dst(), send_recv_pair.second); } } } } void CheckRetIndices(const std::vector<int>& expected, const std::vector<int>& actual) { ASSERT_EQ(expected.size(), actual.size()); for (int i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i], actual[i]) << " at index " << i; } } void CheckArgIndices(const std::vector<FunctionArgIndex>& expected, const std::vector<FunctionArgIndex>& actual) { ASSERT_EQ(expected.size(), actual.size()); for (int i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].index, actual[i].index) << " at index " << i; ASSERT_EQ(expected[i].sub_index, actual[i].sub_index) << " at index " << i; } } void CheckAlloc(const std::vector<bool>& expected, const std::vector<AllocatorAttributes>& actual) { ASSERT_EQ(expected.size(), actual.size()); for (int i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i], actual[i].on_host()) << " at index " << i; } } void CheckIndex(const Node& node, int expected_index) { const AttrValue* attr_value; TF_ASSERT_OK(node.attrs().Find("index", &attr_value)); int index = static_cast<int>(attr_value->i()); ASSERT_EQ(expected_index, index); } TEST_F(PartitioningUtilsTest, UpdateArgsAndRets) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); SubGraph(graph.get(), DT_FLOAT, {3}, {5}); std::vector<FunctionArgIndex> arg_indices; std::vector<int> ret_indices; std::vector<AllocatorAttributes> arg_alloc_attrs; std::vector<AllocatorAttributes> ret_alloc_attrs; Status status = UpdateArgAndRetvalMetadata( graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs, &ret_alloc_attrs, false); ASSERT_TRUE(status.ok()) << status.ToString(); CheckArgIndices({{3, -1}}, arg_indices); CheckRetIndices({5}, ret_indices); CheckAlloc({false}, arg_alloc_attrs); CheckAlloc({false}, ret_alloc_attrs); std::unordered_map<string, Node*> nodes = graph->BuildNodeNameIndex(); ASSERT_EQ(1, nodes.count("x")); CheckIndex(*nodes["x"], 0); ASSERT_EQ(1, nodes.count("retval1")); CheckIndex(*nodes["retval1"], 0); } TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsNotOnDevice) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); SubGraph(graph.get(), DT_INT32, {3}, {5}); std::vector<FunctionArgIndex> arg_indices; std::vector<int> ret_indices; std::vector<AllocatorAttributes> arg_alloc_attrs; std::vector<AllocatorAttributes> ret_alloc_attrs; Int32FulltypePass int32_fulltype; TF_ASSERT_OK( int32_fulltype.ProcessGraph(graph.get(), false)); Status status = UpdateArgAndRetvalMetadata( graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs, &ret_alloc_attrs, false); ASSERT_TRUE(status.ok()) << status.ToString(); CheckAlloc({true}, arg_alloc_attrs); CheckAlloc({true}, ret_alloc_attrs); } TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsOnDevice) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); SubGraph(graph.get(), DT_INT32, {3}, {5}); std::vector<FunctionArgIndex> arg_indices; std::vector<int> ret_indices; std::vector<AllocatorAttributes> arg_alloc_attrs; std::vector<AllocatorAttributes> ret_alloc_attrs; Status status = UpdateArgAndRetvalMetadata( graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs, &ret_alloc_attrs, true); ASSERT_TRUE(status.ok()) << status.ToString(); CheckAlloc({false}, arg_alloc_attrs); CheckAlloc({false}, ret_alloc_attrs); } TEST_F(PartitioningUtilsTest, UpdateArgsAndRets_Order) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); SubGraph(graph.get(), DT_FLOAT, {9, 7, 5, 3, 1}, {2, 4, 6, 8, 10}); const std::map<int, int> sub_indices = { {7, 2}, {3, 1}, {1, 0}, {5, 2}, {9, 0}}; const AttrValue* attr_value; for (Node* n : graph->op_nodes()) { if (n->IsArg()) { TF_ASSERT_OK(n->attrs().Find("index", &attr_value)); n->AddAttr("sub_index", sub_indices.at(static_cast<int>(attr_value->i()))); } } std::vector<FunctionArgIndex> arg_indices; std::vector<int> ret_indices; std::vector<AllocatorAttributes> arg_alloc_attrs; std::vector<AllocatorAttributes> ret_alloc_attrs; Status status = UpdateArgAndRetvalMetadata( graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs, &ret_alloc_attrs, false); ASSERT_TRUE(status.ok()) << status.ToString(); CheckArgIndices({{1, 0}, {3, 1}, {5, 2}, {7, 2}, {9, 0}}, arg_indices); CheckRetIndices({2, 4, 6, 8, 10}, ret_indices); CheckAlloc({false, false, false, false, false}, arg_alloc_attrs); CheckAlloc({false, false, false, false, false}, ret_alloc_attrs); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
908bf121-508e-4c4a-9d0d-8904854bb57f
cpp
tensorflow/tensorflow
ifrt_ops_kernel
tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc
tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc
#include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/python/ifrt/future.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_handle.h" #include "tensorflow/core/framework/resource_var.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h" #include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h" #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h" #include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h" #include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h" #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h" #include "tensorflow/core/tfrt/mlrt/interpreter/context.h" #include "tensorflow/core/tfrt/mlrt/interpreter/future.h" #include "tensorflow/core/tfrt/mlrt/kernel/context.h" #include "tensorflow/core/tfrt/mlrt/kernel/kernel.h" #include "tensorflow/core/tfrt/utils/fallback_tensor.h" #include "tsl/platform/errors.h" #include "tsl/platform/tstring.h" using tensorflow::ifrt_serving::IfrtModelContext; namespace tensorflow { namespace tf_mlrt { namespace { struct MlrtIfrtRestoreVariableKernel : mlrt::KernelFrame { using KernelFrame::KernelFrame; static constexpr char kName[] = "tf_mlrt.ifrt_restore_variable"; tensorflow::tfrt_stub::FallbackTensor prefix() const { DCHECK_GT(arguments().size(), 3); return arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>(); } tensorflow::tfrt_stub::FallbackTensor tensor_names() const { DCHECK_GT(arguments().size(), 3); return arguments()[1].Get<tensorflow::tfrt_stub::FallbackTensor>(); } tensorflow::tfrt_stub::FallbackTensor shape_and_slices() const { DCHECK_GT(arguments().size(), 3); return arguments()[2].Get<tensorflow::tfrt_stub::FallbackTensor>(); } mlrt::bc::Vector<tensorflow::DataType> restored_dtypes() const { return attributes().GetAs<mlrt::bc::Vector<tensorflow::DataType>>(0); } mlrt::bc::Vector<bool> truncate_in_cast() const { return attributes().GetAs<mlrt::bc::Vector<bool>>(1); } std::vector<tensorflow::tfrt_stub::FallbackTensor> var_handles() const { DCHECK_GT(arguments().size(), 3); std::vector<tensorflow::tfrt_stub::FallbackTensor> result; result.reserve(arguments().size() - 3); for (int i = 3; i < arguments().size(); ++i) { result.push_back( arguments()[i].Get<tensorflow::tfrt_stub::FallbackTensor>()); } return result; } Context& context() { return execution_context().GetUserContext<Context>(); } void Invoke(); private: static constexpr int kNumRestoreClusters = 4; absl::Status InvokeHelper(); absl::Status ValidateInput(); }; void MlrtIfrtRestoreVariableKernel::Invoke() { absl::Status status = InvokeHelper(); if (!status.ok()) { execution_context().Fail(std::move(status)); return; } } absl::Status MlrtIfrtRestoreVariableKernel::ValidateInput() { if (prefix().tensor().NumElements() != 1) { return absl::InvalidArgumentError( "The prefix tensor must be a scalar tensor."); } if (!TensorShapeUtils::IsVector(tensor_names().tensor().shape()) || !TensorShapeUtils::IsVector(shape_and_slices().tensor().shape())) { return absl::InvalidArgumentError( absl::StrCat("Input tensor_names and shape_and_slices " "should be an 1-D tensors, got ", tensor_names().tensor().shape().DebugString(), " and ", shape_and_slices().tensor().shape().DebugString())); } if (tensor_names().tensor().NumElements() != shape_and_slices().tensor().NumElements()) { return absl::InvalidArgumentError( "The tensor_names and shape_and_slices tensors must have the same " "number of elements."); } if (tensor_names().tensor().NumElements() != var_handles().size()) { return absl::InvalidArgumentError( "The tensor_names and var_handles must have the same number of " "elements."); } if (tensor_names().tensor().NumElements() != restored_dtypes().size()) { return absl::InvalidArgumentError( "The tensor_names and restored_dtypes must have the same number of " "elements."); } if (tensor_names().tensor().NumElements() != truncate_in_cast().size()) { return absl::InvalidArgumentError( "The tensor_names and truncate_in_cast must have the same number of " "elements."); } return absl::OkStatus(); } absl::Status MlrtIfrtRestoreVariableKernel::InvokeHelper() { std::optional<ifrt_serving::IfrtModelRestoreContext*> model_restore_context = context() .resource_context() .GetResource<ifrt_serving::IfrtModelRestoreContext>( ifrt_serving::kIfrtModelRestoreContextName); if (!model_restore_context.has_value()) { return absl::InternalError( "Did not find IfrtModelRestoreContext resource."); } if (*model_restore_context == nullptr) { return absl::InternalError("IfrtModelRestoreContext must not be null."); } ifrt_serving::CheckpointLoader* checkpoint_loader = (*model_restore_context)->checkpoint_loader(); if (!checkpoint_loader) { return absl::InternalError("CheckpointLoader must not be null."); } TF_RETURN_IF_ERROR(ValidateInput()); std::vector<tensorflow::DataType> restored_dtypes_vec( restored_dtypes().begin(), restored_dtypes().end()); std::vector<bool> truncate_in_cast_vec(truncate_in_cast().begin(), truncate_in_cast().end()); return checkpoint_loader->Load(prefix(), var_handles(), tensor_names(), shape_and_slices(), restored_dtypes_vec, truncate_in_cast_vec, context()); } class MlrtIfrtLoadVariableKernel : public mlrt::KernelFrame { public: using KernelFrame::KernelFrame; static constexpr char kName[] = "tf_mlrt.ifrt_load_variable"; const tensorflow::Tensor& variable_handler_tensor() const { DCHECK_GE(arguments().size(), 1); const tensorflow::Tensor& ret = arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>().tensor(); DCHECK_EQ(ret.NumElements(), 1); return ret; } bool used_by_host() const { DCHECK_EQ(attributes().size(), 1); return attributes().GetAs<bool>(0); } Context& context() { return execution_context().GetUserContext<Context>(); } void Invoke(); private: absl::Status InvokeHelper(); }; void MlrtIfrtLoadVariableKernel::Invoke() { absl::Status status = InvokeHelper(); if (!status.ok()) { execution_context().Fail(std::move(status)); return; } } absl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() { DCHECK_EQ(2, results().size()); std::optional<IfrtModelContext*> ifrt_model_context = context().resource_context().GetResource<IfrtModelContext>( "IfrtModelContext"); if (!ifrt_model_context.has_value()) { return absl::FailedPreconditionError( "LoadVariableOp: failed to fetch IfrtModelContext: "); } auto tensor_promise = mlrt::Promise::Allocate<tensorflow::tfrt_stub::FallbackTensor>(); auto tensor_future = tensor_promise.GetFuture(); ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry = (*ifrt_model_context)->GetRestoreTensorRegistry(); auto& resource_handle = variable_handler_tensor().scalar<ResourceHandle>()(); std::string runtime_name = ifrt_serving::GetRuntimeNameFromVarHandle(resource_handle); if (used_by_host()) { if (ifrt_restore_tensor_registry.SetUsedByHost(runtime_name).ok()) { xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future = ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name); restored_tensor_future.OnReady( [tensor_promise = std::move(tensor_promise)]( absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable { if (!restored_tensor.ok()) { std::move(tensor_promise).SetError(restored_tensor.status()); return; } std::move(tensor_promise) .Set<tensorflow::tfrt_stub::FallbackTensor>( tensorflow::tfrt_stub::FallbackTensor(*restored_tensor)); }); } else { auto resource_manager = context() .fallback_request_state() .device_manager() .HostCPU() ->resource_manager(); DCHECK(resource_manager); Var* variable; TF_RETURN_IF_ERROR(resource_manager->Lookup( resource_handle.container(), resource_handle.name(), &variable)); if (tensorflow::Tensor* t = variable->tensor(); t != nullptr) { std::move(tensor_promise) .Set<tensorflow::tfrt_stub::FallbackTensor>( tensorflow::tfrt_stub::FallbackTensor(*t)); } else { std::move(tensor_promise) .SetError(absl::InternalError( absl::StrCat("Variable ", resource_handle.name(), " is not found in either " "IfrtRestoreTensorRegistry or ResourceManager"))); } } } else { std::move(tensor_promise) .Set<tensorflow::tfrt_stub::FallbackTensor>( tensorflow::tfrt_stub::FallbackTensor()); } tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {}); key_tensor.scalar<tsl::tstring>()() = runtime_name; results()[0].Set(tensorflow::tfrt_stub::FallbackTensor(key_tensor)); results()[1].Set(std::move(tensor_future)); return absl::OkStatus(); } void RegisterTfMlrtIfrtKernels(mlrt::KernelRegistry& registry) { registry.Register<MlrtIfrtLoadVariableKernel>(); registry.Register<MlrtIfrtRestoreVariableKernel>(); } } const bool kUnused = [] { RegisterTfMlrtIfrtKernels(GetTfMlrtOptionalKernelRegistry()); return true; }(); } }
#include <cstdint> #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "absl/synchronization/notification.h" #include "absl/types/span.h" #include "xla/python/ifrt/client.h" #include "xla/python/ifrt/future.h" #include "xla/python/ifrt/test_util.h" #include "xla/tsl/framework/test_util/mock_serving_device_selector.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/resource_var.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_matcher.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/resource_loader.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h" #include "tensorflow/core/tfrt/fallback/fallback_state.h" #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h" #include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h" #include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h" #include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h" #include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h" #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h" #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h" #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h" #include "tensorflow/core/tfrt/mlrt/bytecode/executable.h" #include "tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h" #include "tensorflow/core/tfrt/mlrt/interpreter/context.h" #include "tensorflow/core/tfrt/mlrt/interpreter/execute.h" #include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h" #include "tensorflow/core/tfrt/mlrt/interpreter/value.h" #include "tensorflow/core/tfrt/mlrt/kernel/context.h" #include "tensorflow/core/tfrt/mlrt/kernel/kernel.h" #include "tensorflow/core/tfrt/utils/fallback_tensor.h" #include "tsl/platform/env.h" #include "tsl/platform/refcount.h" #include "tsl/platform/status.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" #include "tsl/platform/tstring.h" #include "tfrt/host_context/concurrent_work_queue.h" #include "tfrt/host_context/resource_context.h" namespace tensorflow { namespace tf_mlrt { namespace { using tensorflow::test::AsScalar; using tensorflow::test::AsTensor; using tensorflow::test::ExpectEqual; using tensorflow::test::TensorEq; constexpr absl::string_view kContainer = "test"; constexpr absl::string_view kSharedName = "y"; constexpr absl::string_view kVariableRuntimeName = "test__y"; tsl::thread::ThreadPool& GetThreadPool() { constexpr int kMaxParallelism = 16; static tsl::thread::ThreadPool* thread_pool = new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(), "IfrtSharding", kMaxParallelism); return *thread_pool; } std::string EncodeRestoreDtypesInt32(int num_outputs) { mlrt::bc::Buffer buffer; mlrt::bc::Allocator allocator(&buffer); auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>( &allocator, num_outputs); for (int i = 0; i < num_outputs; ++i) { ctor.ConstructAt(i, tensorflow::DT_INT32); } return std::string(buffer.data(), buffer.size()); } std::string EncodeTruncateInCast(int num_outputs) { mlrt::bc::Buffer buffer; mlrt::bc::Allocator allocator(&buffer); auto ctor = mlrt::bc::New<mlrt::bc::Vector<bool>>(&allocator, num_outputs); for (int i = 0; i < num_outputs; ++i) { ctor.ConstructAt(i, false); } return std::string(buffer.data(), buffer.size()); } mlrt::bc::Buffer CreateExecutableForIfrtRestoreVariableOp( int num_variables = 1) { mlrt::bc::Buffer buffer; mlrt::bc::Allocator allocator(&buffer); auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator); mlrt::testing::SymbolTable kernels; std::vector<std::string> kernel_names = { "tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_restore_variable", "return"}; executable_ctor.construct_kernel_names(kernel_names.size()) .Assign(kernel_names); kernels.Def(kernel_names); static constexpr int kNumAttributes = 5; mlrt::testing::AttributeTable attributes(executable_ctor.construct_attributes( kNumAttributes + 2 * (num_variables - 1))); std::string restore_dtypes = EncodeRestoreDtypesInt32(num_variables); attributes.Add("restore_dtypes", restore_dtypes); std::vector<bool> truncate_in_cast(num_variables, false); attributes.Add("truncate_in_cast", EncodeTruncateInCast(num_variables)); for (int i = 0; i < num_variables; ++i) { attributes.Add( absl::StrCat("var_handle_op_node_def", i), absl::Substitute( R"pb(name: "$0" op: "VarHandleOp" device: "/job:localhost/replica:0/task:0/device:CPU:0" attr { key: "container" value { s: "$1" } } attr { key: "shared_name" value { s: "$2" } } attr { key: "dtype" value { type: DT_INT16 } } attr { key: "shape" value { shape { dim { size: 3 } } } } )pb", absl::StrCat("VarHandleOp", i), kContainer, absl::StrCat(kSharedName, i))); attributes.Add(absl::StrCat("var_handle_op_key", i), i); } auto functions_ctor = executable_ctor.construct_functions(1); { auto function_ctor = functions_ctor.ConstructAt(0); function_ctor.construct_name("main"); mlrt::testing::SymbolTable regs; function_ctor.construct_input_regs(3).Assign( regs.Def({"prefix_tensor", "name_tensor", "slice_tensor"})); const int kNumKernels = 4; auto kernels_ctor = function_ctor.construct_kernels(kNumKernels + 2 * (num_variables - 1)); int kernel_index = 0; std::vector<std::string> variable_handle_names; variable_handle_names.reserve(num_variables); for (int i = 0; i < num_variables; ++i) { variable_handle_names.push_back(absl::StrCat("variable_handle", i)); std::string variable_handle_op_node_def = absl::StrCat("var_handle_op_node_def", i); std::string variable_handle_op_key = absl::StrCat("var_handle_op_key", i); { auto createop_ctor = kernels_ctor.ConstructAt(kernel_index); createop_ctor.set_code(kernels.Use("tf_mlrt.createop")); createop_ctor.construct_arguments(0); createop_ctor.construct_results(0); createop_ctor.construct_attributes(2).Assign( {attributes.GetHandle(variable_handle_op_node_def), attributes.GetHandle(variable_handle_op_key)}); kernel_index++; } { auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index); executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop")); executeop_ctor.construct_arguments(0); executeop_ctor.construct_results(1).Assign( {regs.Def(variable_handle_names.back())}); executeop_ctor.construct_attributes(2).Assign( {attributes.GetHandle(variable_handle_op_node_def), attributes.GetHandle(variable_handle_op_key)}); executeop_ctor.construct_last_uses(1).Assign({0}); kernel_index++; } } { std::vector<std::string> args; args.reserve(3 + num_variables); args.push_back("prefix_tensor"); args.push_back("name_tensor"); args.push_back("slice_tensor"); for (int i = 0; i < num_variables; ++i) { args.push_back(variable_handle_names[i]); } auto restore_ctor = kernels_ctor.ConstructAt(kernel_index); restore_ctor.set_code(kernels.Use("tf_mlrt.ifrt_restore_variable")); restore_ctor.construct_arguments(args.size()).Assign(regs.Use(args)); restore_ctor.construct_results(0); restore_ctor.construct_attributes(2).Assign( {attributes.GetHandle("restore_dtypes"), attributes.GetHandle("truncate_in_cast")}); kernel_index++; } { auto return_ctor = kernels_ctor.ConstructAt(kernel_index); return_ctor.set_code(kernels.Use("return")); return_ctor.construct_arguments(0); kernel_index++; } function_ctor.set_num_regs(regs.size()); } return buffer; } mlrt::bc::Buffer CreateExecutableForIfrtLoadVariableOp( bool redundant_ifrt_load_variable_op = false, bool used_by_host = false) { mlrt::bc::Buffer buffer; mlrt::bc::Allocator allocator(&buffer); auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator); mlrt::testing::SymbolTable kernels; std::vector<std::string> kernel_names = { "tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_load_variable", "return"}; executable_ctor.construct_kernel_names(kernel_names.size()) .Assign(kernel_names); kernels.Def(kernel_names); mlrt::testing::AttributeTable attributes( executable_ctor.construct_attributes(3)); attributes.Add("var_handle_op_node_def", absl::Substitute( R"pb(name: "VarHandleOp" op: "VarHandleOp" device: "/job:localhost/replica:0/task:0/device:CPU:0" attr { key: "container" value { s: "$0" } } attr { key: "shared_name" value { s: "$1" } } attr { key: "dtype" value { type: DT_INT32 } } attr { key: "shape" value { shape { dim { size: 1 } } } } )pb", kContainer, kSharedName)); attributes.Add("var_handle_op_key", 0); attributes.Add("used_by_host", used_by_host); auto functions_ctor = executable_ctor.construct_functions(1); { auto function_ctor = functions_ctor.ConstructAt(0); function_ctor.construct_name("main"); mlrt::testing::SymbolTable regs; function_ctor.construct_output_regs(2).Assign( {regs.Def("output_tensor"), regs.Def("output_future")}); const int kNumKernels = 4 + (redundant_ifrt_load_variable_op ? 1 : 0); auto kernels_ctor = function_ctor.construct_kernels(kNumKernels); int kernel_index = 0; { auto createop_ctor = kernels_ctor.ConstructAt(kernel_index); createop_ctor.set_code(kernels.Use("tf_mlrt.createop")); createop_ctor.construct_arguments(0); createop_ctor.construct_results(0); createop_ctor.construct_attributes(2).Assign( {attributes.GetHandle("var_handle_op_node_def"), attributes.GetHandle("var_handle_op_key")}); kernel_index++; } { auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index); executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop")); executeop_ctor.construct_arguments(0); executeop_ctor.construct_results(1).Assign({regs.Def("variable_handle")}); executeop_ctor.construct_attributes(2).Assign( {attributes.GetHandle("var_handle_op_node_def"), attributes.GetHandle("var_handle_op_key")}); kernel_index++; } { auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index); kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable")); kernel_ctor.construct_results(2).Assign( {regs.Use("output_tensor"), regs.Use("output_future")}); kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")}); kernel_ctor.construct_attributes(1).Assign( {attributes.GetHandle("used_by_host")}); kernel_ctor.construct_last_uses(1).Assign( {redundant_ifrt_load_variable_op ? 0 : 1}); kernel_index++; } if (redundant_ifrt_load_variable_op) { auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index); kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable")); kernel_ctor.construct_results(2).Assign( {regs.Def("dummy"), regs.Def("dummy_future2")}); kernel_ctor.construct_attributes(1).Assign( {attributes.GetHandle("used_by_host")}); kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")}); kernel_ctor.construct_last_uses(1).Assign({1}); kernel_index++; } { auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index); kernel_ctor.set_code(kernels.Use("return")); kernel_ctor.construct_arguments(2).Assign( {regs.Use("output_tensor"), regs.Use("output_future")}); kernel_index++; } DCHECK_EQ(kernel_index, kNumKernels); function_ctor.set_num_regs(regs.size()); } return buffer; } class KernelTest : public ::testing::Test { protected: void SetUp() override { mlrt::RegisterBuiltinKernels(registry_); RegisterTfMlrtKernels(registry_); execution_work_queue_ = tfrt::CreateMultiThreadedWorkQueue( 4, 4); restore_work_queue_ = tfrt::CreateMultiThreadedWorkQueue( 4, 4); TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create( session_options_, fdef_lib_)); runner_ = [](const std::function<void()>& f) { f(); }; fallback_request_state_ = std::make_unique<tfd::KernelFallbackCompatRequestState>( &runner_, &fallback_state_->device_manager(), 0, &runner_table_, &resource_array_, nullptr, std::nullopt, &fallback_state_->process_function_library_runtime()); TF_ASSERT_OK_AND_ASSIGN(client_, xla::ifrt::test_util::GetClient()); resource_context_ .CreateResource<tensorflow::ifrt_serving::IfrtModelContext>( "IfrtModelContext", client_, ifrt_core_selector_.get(), &GetThreadPool(), nullptr); tf_context_ = std::make_unique<Context>(fallback_request_state_.get(), &resource_context_); ifrt_model_context_ = resource_context_ .GetResource<tensorflow::ifrt_serving::IfrtModelContext>( "IfrtModelContext") .value(); ifrt_model_context_->set_checkpoint_loader_queue(restore_work_queue_.get()); resource_context_ .CreateResource<tensorflow::ifrt_serving::IfrtModelRestoreContext>( ifrt_serving::kIfrtModelRestoreContextName, std::make_unique<tensorflow::ifrt_serving::CheckpointLoader>( &ifrt_model_context_->GetRestoreTensorRegistry(), ifrt_model_context_->checkpoint_loader_queue())); serving_device_selector_ = std::make_unique<tsl::test_util::MockServingDeviceSelector>(); ifrt_core_selector_ = std::make_unique<ifrt_serving::IfrtServingCoreSelector>( serving_device_selector_.get(), client_->addressable_device_count()); } std::unique_ptr<tsl::test_util::MockServingDeviceSelector> serving_device_selector_; std::unique_ptr<ifrt_serving::IfrtServingCoreSelector> ifrt_core_selector_; mlrt::KernelRegistry registry_; std::unique_ptr<tfrt::ConcurrentWorkQueue> execution_work_queue_; std::unique_ptr<tfrt::ConcurrentWorkQueue> restore_work_queue_; tensorflow::SessionOptions session_options_; tensorflow::FunctionDefLibrary fdef_lib_; std::function<void(std::function<void()>)> runner_; tfrt_stub::OpKernelRunnerTable runner_table_; tfd::FallbackResourceArray resource_array_; std::unique_ptr<tfrt_stub::FallbackState> fallback_state_; tfrt::ResourceContext resource_context_; std::shared_ptr<xla::ifrt::Client> client_; std::unique_ptr<tfd::KernelFallbackCompatRequestState> fallback_request_state_; std::unique_ptr<Context> tf_context_; tensorflow::ifrt_serving::IfrtModelContext* ifrt_model_context_; }; TEST_F(KernelTest, IfrtLoadVariableOpCanGetTensorFromResourceManager) { auto buffer = CreateExecutableForIfrtLoadVariableOp( false, true); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); tensorflow::Tensor input_tensor; TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor)); input_tensor.scalar<int32_t>()() = 1234; tsl::core::RefCountPtr<Var> variable(new Var(DT_INT32)); *variable->tensor() = input_tensor; variable->is_initialized = true; ASSERT_OK( fallback_state_->device_manager().HostCPU()->resource_manager()->Create( std::string(kContainer), std::string(kSharedName), &(*variable))); std::vector<mlrt::Value> args; std::vector<uint8_t> last_uses; std::vector<mlrt::Value> results; results.resize(2); absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); TF_ASSERT_OK(execution_context.status()); ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(), AsScalar(tsl::tstring(kVariableRuntimeName))); auto returned_future = results[1].Get<mlrt::Future>(); ASSERT_TRUE(returned_future.IsReady()); EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(), TensorEq(input_tensor)); } TEST_F(KernelTest, IfrtLoadVariableOp) { auto buffer = CreateExecutableForIfrtLoadVariableOp(); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); tensorflow::Tensor input_tensor; TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor)); input_tensor.scalar<int32_t>()() = 1234; auto input_tensor_promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise(); auto input_tensor_future = xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise); ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(), .shape = input_tensor.shape()}, .tensor_future = input_tensor_future}; input_tensor_promise.Set(input_tensor); TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister( kVariableRuntimeName, restore_tensor_info)); std::vector<mlrt::Value> args; std::vector<uint8_t> last_uses; std::vector<mlrt::Value> results; results.resize(2); absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); TF_ASSERT_OK(execution_context.status()); ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(), AsScalar(tsl::tstring(kVariableRuntimeName))); auto returned_future = results[1].Get<mlrt::Future>(); ASSERT_TRUE(returned_future.IsReady()); EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(), TensorEq(tensorflow::Tensor())); } TEST_F(KernelTest, DuplicateIfrtLoadVariableOpShallSucceed) { auto buffer = CreateExecutableForIfrtLoadVariableOp( true); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); tensorflow::Tensor input_tensor; TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor)); input_tensor.scalar<int32_t>()() = 1234; auto input_tensor_promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise(); auto input_tensor_future = xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise); ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(), .shape = input_tensor.shape()}, .tensor_future = input_tensor_future}; input_tensor_promise.Set(input_tensor); TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister( kVariableRuntimeName, restore_tensor_info)); std::vector<mlrt::Value> args; std::vector<uint8_t> last_uses; std::vector<mlrt::Value> results; results.resize(2); absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); TF_ASSERT_OK(execution_context.status()); ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(), AsScalar(tsl::tstring(kVariableRuntimeName))); auto returned_future = results[1].Get<mlrt::Future>(); ASSERT_TRUE(returned_future.IsReady()); EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(), TensorEq(tensorflow::Tensor())); } TEST_F(KernelTest, IfrtRestoreVariableOp) { std::string checkpoint_prefix = tensorflow::GetDataDependencyFilepath( "tensorflow/core/tfrt/mlrt/kernel/testdata/" "gen_checkpoint_data/variables") + "/variables"; auto buffer = CreateExecutableForIfrtRestoreVariableOp(); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( kVariableRuntimeName); ASSERT_TRUE(uninitialized_entry.IsReady()); EXPECT_THAT(uninitialized_entry.Await().status(), ::tsl::testing::StatusIs(absl::StatusCode::kNotFound)); std::vector<mlrt::Value> args; args.resize(3); tensorflow::Tensor prefix_tensor = AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)}); args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor))); tensorflow::Tensor name_tensor = AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE")}); args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor))); tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>({tsl::tstring("")}); args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor))); std::vector<uint8_t> last_uses = {true, true, true}; std::vector<mlrt::Value> results; absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); TF_ASSERT_OK(execution_context.status()); xla::ifrt::Future<tensorflow::Tensor> restored_future = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( absl::StrCat(kVariableRuntimeName, 0)); absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await(); TF_ASSERT_OK(restored_tensor.status()); EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3}))); } TEST_F(KernelTest, IfrtRestoreVariableOp4Variables) { std::string checkpoint_prefix = tensorflow::GetDataDependencyFilepath( "tensorflow/core/tfrt/mlrt/kernel/testdata/" "gen_checkpoint_data/variables") + "/variables"; static constexpr int kNumVariables = 4; auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( kVariableRuntimeName); ASSERT_TRUE(uninitialized_entry.IsReady()); EXPECT_THAT(uninitialized_entry.Await().status(), ::tsl::testing::StatusIs(absl::StatusCode::kNotFound)); std::vector<mlrt::Value> args; args.resize(3); tensorflow::Tensor prefix_tensor = AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)}); args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor))); tensorflow::Tensor name_tensor = AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")}); args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor))); tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>( {tsl::tstring(""), tsl::tstring(""), tsl::tstring(""), tsl::tstring("")}); args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor))); std::vector<uint8_t> last_uses = {true, true, true}; std::vector<mlrt::Value> results; absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); TF_ASSERT_OK(execution_context.status()); xla::ifrt::Future<tensorflow::Tensor> restored_future = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( absl::StrCat(kVariableRuntimeName, 0)); absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await(); TF_ASSERT_OK(restored_tensor.status()); EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3}))); xla::ifrt::Future<tensorflow::Tensor> restored_future1 = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( absl::StrCat(kVariableRuntimeName, 1)); absl::StatusOr<tensorflow::Tensor> restored_tensor1 = restored_future1.Await(); TF_ASSERT_OK(restored_tensor1.status()); EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor<int16_t>({4, 5, 6}, {3}))); xla::ifrt::Future<tensorflow::Tensor> restored_future2 = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( absl::StrCat(kVariableRuntimeName, 2)); absl::StatusOr<tensorflow::Tensor> restored_tensor2 = restored_future2.Await(); TF_ASSERT_OK(restored_tensor2.status()); EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor<int16_t>({7, 8, 9}, {3}))); xla::ifrt::Future<tensorflow::Tensor> restored_future3 = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( absl::StrCat(kVariableRuntimeName, 3)); absl::StatusOr<tensorflow::Tensor> restored_tensor3 = restored_future3.Await(); TF_ASSERT_OK(restored_tensor3.status()); EXPECT_THAT(*restored_tensor3, TensorEq(AsTensor<int16_t>({10, 11, 12}, {3}))); } TEST_F(KernelTest, IfrtRestoreVariableOpInValidInput) { std::string checkpoint_prefix = tensorflow::GetDataDependencyFilepath( "tensorflow/core/tfrt/mlrt/kernel/testdata/" "gen_checkpoint_data/variables") + "/variables"; static constexpr int kNumVariables = 4; auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables); mlrt::bc::Executable executable(buffer.data()); mlrt::LoadedExecutable loaded_executable(executable, registry_); mlrt::ExecutionContext execution_context(&loaded_executable); execution_context.set_work_queue(execution_work_queue_.get()); execution_context.AddUserContext(std::move(tf_context_)); xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry = ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor( kVariableRuntimeName); ASSERT_TRUE(uninitialized_entry.IsReady()); EXPECT_THAT(uninitialized_entry.Await().status(), ::tsl::testing::StatusIs(absl::StatusCode::kNotFound)); std::vector<mlrt::Value> args; args.resize(3); tensorflow::Tensor prefix_tensor = AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)}); args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor))); tensorflow::Tensor name_tensor = AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"), tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")}); args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor))); tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>( {tsl::tstring(""), tsl::tstring(""), tsl::tstring("")}); args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor))); std::vector<uint8_t> last_uses = {true, true, true}; std::vector<mlrt::Value> results; absl::Notification notification; execution_context.set_exit_handler( [&notification]() { notification.Notify(); }); execution_context.Call(executable.functions()[0], last_uses, absl::MakeSpan(args), absl::MakeSpan(results)); mlrt::Execute(execution_context); notification.WaitForNotification(); EXPECT_THAT(execution_context.status(), ::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9fb23443-70bb-4a1a-b176-8ffde412eac7
cpp
tensorflow/tensorflow
clamp
tensorflow/lite/experimental/shlo/legacy/src/clamp.cc
tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc
#include <algorithm> #include <cstddef> #include <type_traits> #include "absl/status/status.h" #include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h" #include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h" #include "tensorflow/lite/experimental/shlo/legacy/src/storage.h" #include "tensorflow/lite/experimental/shlo/legacy/src/util.h" namespace stablehlo { namespace { template <typename Value> absl::Status CheckParameters(const Value& min, const Value& operand, const Value& max, Value& result) { if (!(min.rank() == 0 or min.shape() == operand.shape())) { return absl::InvalidArgumentError( "Constraint violation: rank(min) = 0 or shape(min) = shape(operand)"); } else if (!(max.rank() == 0 or max.shape() == operand.shape())) { return absl::InvalidArgumentError( "Constraint violation: rank(max) = 0 or shape(max) = shape(operand)"); } else if (!(min.baseline_element_type() == operand.baseline_element_type() and min.baseline_element_type() == max.baseline_element_type())) { return absl::InvalidArgumentError( "Constraint violation: baseline_element_type(min) = " "baseline_element_type(operand) = baseline_element_type(max)"); } else if (!(operand.baseline_type() == result.baseline_type())) { return absl::InvalidArgumentError( "Constraint violation: baseline_type(operand) = baseline_type(result)"); } if constexpr (std::is_same_v<Value, QuantizedTensor>) { if (!(min.is_per_tensor_quantized() and max.is_per_tensor_quantized() and operand.is_per_tensor_quantized() and result.is_per_tensor_quantized())) { return absl::InvalidArgumentError("Expected per-tensor quantization"); } } if (operand.layout().has_strides() || result.layout().has_strides()) { return absl::InvalidArgumentError("Stides not supported yet"); } return absl::OkStatus(); } template <ElementType storage_type, ElementType expressed_type, typename Value> absl::Status Clamp(const Value& min, const Value& operand, const Value& max, Value& result) { if (auto check = CheckParameters(min, operand, max, result); !check.ok()) { return check; } using S = Storage<storage_type>; const bool min_is_tensor = (min.rank() > 0); const bool max_is_tensor = (max.rank() > 0); const size_t n = result.num_elements(); auto operand_buffer = operand.buffer(); auto min_buffer = min.buffer(); auto max_buffer = max.buffer(); auto result_buffer = result.buffer(); if constexpr (std::is_same_v<Value, Tensor>) { if (storage_type != result.element_type()) { return absl::InvalidArgumentError("Unexpected tensor element type"); } typename S::Type min_value; typename S::Type max_value; for (size_t i = 0; i < n; ++i) { if (min_is_tensor || (i == 0)) { min_value = S::Get(min_buffer, i); } if (max_is_tensor || (i == 0)) { max_value = S::Get(max_buffer, i); } auto operand_value = S::Get(operand_buffer, i); auto result_value = std::min(max_value, std::max(min_value, operand_value)); S::Set(result_buffer, i, result_value); } } else { static_assert(std::is_same_v<Value, QuantizedTensor>); if (storage_type != result.storage_type()) { return absl::InvalidArgumentError("Unexpected storage type"); } else if (expressed_type != result.expressed_type()) { return absl::InvalidArgumentError("Unexpected expressed type"); } using ET = typename Storage<expressed_type>::Type; const QuantizedParameter& min_quant_param = min.type().element_type().parameters(0); const QuantizedParameter& max_quant_param = max.type().element_type().parameters(0); const QuantizedParameter& operand_quant_param = operand.type().element_type().parameters(0); const QuantizedParameter& result_quant_param = result.type().element_type().parameters(0); ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale); ET min_expressed; ET max_expressed; for (size_t i = 0; i < n; ++i) { if (min_is_tensor || (i == 0)) { auto min_storage = S::Get(min_buffer, i); min_expressed = Dequantize<storage_type, expressed_type>( min_storage, min_quant_param); } if (max_is_tensor || (i == 0)) { auto max_storage = S::Get(max_buffer, i); max_expressed = Dequantize<storage_type, expressed_type>( max_storage, max_quant_param); } auto operand_storage = S::Get(operand_buffer, i); auto result_storage = DequantizeOpQuantizePartial<storage_type, expressed_type>( operand_storage, operand_quant_param, result_scale_inv, result_quant_param.zero_point, [=](auto x) { return std::min(max_expressed, std::max(min_expressed, x)); }); S::Set(result_buffer, i, result_storage); } if (auto status = CompleteQuantization<storage_type>(result); !status.ok()) { return status; } } return absl::OkStatus(); } } absl::Status Clamp(const Tensor& min, const Tensor& operand, const Tensor& max, Tensor& result) { DISPATCH_INT_FLOAT(Clamp, result.element_type(), min, operand, max, result); } absl::Status Clamp(const QuantizedTensor& min, const QuantizedTensor& operand, const QuantizedTensor& max, QuantizedTensor& result) { DISPATCH_QUANTIZED(Clamp, result.storage_type(), result.expressed_type(), min, operand, max, result); } }
#include <initializer_list> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h" #include "tensorflow/lite/experimental/shlo/legacy/src/debug.h" #include "tensorflow/lite/experimental/shlo/legacy/src/storage.h" #include "tensorflow/lite/experimental/shlo/legacy/test/util.h" namespace stablehlo { namespace testing { template <ElementType element_type> void test(std::initializer_list<DimensionSize>&& shape, std::vector<typename Storage<element_type>::Type>&& min_values, std::vector<typename Storage<element_type>::Type>&& operand_values, std::vector<typename Storage<element_type>::Type>&& max_values, std::vector<typename Storage<element_type>::Type>&& expected_values) { Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape(); Tensor min(TensorType(std::move(min_shape), element_type), min_values.data()); Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape(); Tensor max(TensorType(std::move(max_shape), element_type), max_values.data()); Tensor operand(TensorType(Shape(shape), element_type), operand_values.data()); Tensor expected(TensorType(Shape(shape), element_type), expected_values.data()); std::vector<typename Storage<element_type>::Type> result_values( expected_values.size()); Tensor result(TensorType(Shape(shape), element_type), result_values.data()); ASSERT_OK(Clamp(min, operand, max, result)); EXPECT_EQ(result, expected) << "min: " << min << "\nmax: " << max << "\noperand: " << operand; } template <ElementType storage_type, ElementType expressed_type> void test( QuantizedParameter&& quantized_parameter, std::initializer_list<DimensionSize>&& shape, std::vector<typename Storage<expressed_type>::Type>&& min_values, std::vector<typename Storage<expressed_type>::Type>&& operand_values, std::vector<typename Storage<expressed_type>::Type>&& max_values, std::vector<typename Storage<expressed_type>::Type>&& expected_values) { auto min_quant_values = QuantizeVector<storage_type, expressed_type>( min_values, quantized_parameter); auto operand_quant_values = QuantizeVector<storage_type, expressed_type>( operand_values, quantized_parameter); auto max_quant_values = QuantizeVector<storage_type, expressed_type>( max_values, quantized_parameter); auto expected_quant_values = QuantizeVector<storage_type, expressed_type>( expected_values, quantized_parameter); std::vector<typename Storage<storage_type>::Type> result_quant_values( expected_quant_values.size()); QuantizedTensorElementType element_type(storage_type, expressed_type, std::move(quantized_parameter)); Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape(); QuantizedTensor min( QuantizedTensorType(std::move(min_shape), QuantizedTensorElementType(element_type)), min_quant_values.data()); Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape(); QuantizedTensor max( QuantizedTensorType(std::move(max_shape), QuantizedTensorElementType(element_type)), max_quant_values.data()); QuantizedTensor operand( QuantizedTensorType(Shape(shape), QuantizedTensorElementType(element_type)), operand_quant_values.data()); QuantizedTensor expected( QuantizedTensorType(Shape(shape), QuantizedTensorElementType(element_type)), expected_quant_values.data()); QuantizedTensor result( QuantizedTensorType(Shape(shape), QuantizedTensorElementType(element_type)), result_quant_values.data()); ASSERT_OK(Clamp(min, operand, max, result)); EXPECT_EQ(result, expected) << "min: " << min << "\nmax: " << max << "\noperand: " << operand; } TEST(Clamp, Unquantized) { test<ElementType::kSI8>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kBF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kF32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI8>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kBF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kF32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); } TEST(Clamp, Quantized) { test<ElementType::kSI8, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI8, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI8, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI16, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI16, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI16, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI32, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI32, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI32, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1}); test<ElementType::kSI8, ElementType::kBF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI8, ElementType::kF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI8, ElementType::kF32>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI16, ElementType::kBF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI16, ElementType::kF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI16, ElementType::kF32>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI32, ElementType::kBF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI32, ElementType::kF16>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); test<ElementType::kSI32, ElementType::kF32>({.scale = 0.1, .zero_point = 0}, {3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2}); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/clamp.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d760ed97-1225-41c7-86ae-6346991c7cf1
cpp
tensorflow/tensorflow
validator
tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc
tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h" #include <stdint.h> #include <string.h> #include <time.h> #include <unistd.h> #include <functional> #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h" #include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h" #include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/core/c/c_api.h" #include "tensorflow/lite/core/c/c_api_types.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/core/interpreter.h" #include "tensorflow/lite/core/interpreter_builder.h" #include "tensorflow/lite/core/kernels/register.h" #include "tensorflow/lite/core/subgraph.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h" #include "tensorflow/lite/logger.h" #include "tensorflow/lite/minimal_logging.h" #include "tensorflow/lite/mutable_op_resolver.h" #include "tensorflow/lite/tools/benchmark/register_custom_op.h" #include "tensorflow/lite/tools/model_loader.h" #ifndef TEMP_FAILURE_RETRY #ifdef __ANDROID__ #error "TEMP_FAILURE_RETRY not set although on Android" #else #define TEMP_FAILURE_RETRY(exp) exp #endif #endif namespace tflite { namespace acceleration { namespace { std::unique_ptr<tflite::delegates::DelegatePluginInterface> LoadDelegatePlugin( const std::string& name, const tflite::TFLiteSettings& tflite_settings) { return tflite::delegates::DelegatePluginRegistry::CreateByName( name + "Plugin", tflite_settings); } void AppendTensorDataToVector(const TfLiteTensor* tensor, std::vector<std::vector<char>>& output_vector) { std::vector<char> char_output(TfLiteTensorByteSize(tensor)); memcpy(char_output.data(), TfLiteTensorData(tensor), TfLiteTensorByteSize(tensor)); output_vector.emplace_back(std::move(char_output)); } inline bool HasTensorData(tools::ModelLoader* model_loader, const Subgraph& graph, int index) { const TfLiteTensor* tensor = graph.tensor(index); return tensor->allocation != nullptr || (model_loader->type() == tools::ModelLoader::Type::kPipeModelLoader && tensor->data.data != nullptr); } constexpr int64_t kMicrosInSecond = 1000 * 1000; constexpr int64_t kNanosInMicro = 1000; int64_t ElapsedTimeMicros() { struct timespec ts; #if defined(__ANDROID__) int err = clock_gettime(CLOCK_BOOTTIME, &ts); #elif defined(_WIN32) int err = 1; #else int err = clock_gettime(CLOCK_MONOTONIC, &ts); #endif if (err) { return -1; } return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro; } class ValidatorProfiler : public ::tflite::Profiler { public: struct EventData { std::string tag; int64_t start_time_us = -1; int64_t end_time_us = -1; }; const std::vector<EventData>& events() { return events_; } uint32_t BeginEvent(const char* tag, EventType event_type, int64_t event_metadata1, int64_t event_metadata2) override { if (event_type != EventType::DEFAULT) { return 0; } events_.push_back({tag, ElapsedTimeMicros(), -1}); return events_.size(); } void EndEvent(uint32_t event_handle) override { if (event_handle == 0) { return; } events_[event_handle - 1].end_time_us = ElapsedTimeMicros(); } private: std::vector<EventData> events_; }; } MinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) { if (!interpreter_ || !model_loader_->GetModel()) { return kMinibenchmarkPreconditionNotMet; } if (validation_entrypoint_->inputs().size() <= 1) { return kMinibenchmarkValidationSubgraphHasTooFewInputs; } if (validation_entrypoint_->inputs().size() > validation_entrypoint_->outputs().size()) { return kMinibenchmarkValidationSubgraphHasTooFewOutputs; } if (HasTensorData(model_loader_.get(), *validation_entrypoint_, validation_entrypoint_->inputs()[0])) { return kMinibenchmarkSuccess; } TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running on CPU to get golden output for comparison."); tflite::InterpreterBuilder(*model_loader_->GetModel(), *resolver_)(&golden_interpreter_); if (!golden_interpreter_) { return kMinibenchmarkInterpreterBuilderFailed; } Subgraph* golden_validation_entrypoint = golden_interpreter_->subgraph(validation_entrypoint_index_); if (golden_validation_entrypoint->AllocateTensors() != kTfLiteOk) { return kMinibenchmarkAllocateTensorsFailed; } for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) { TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor( golden_validation_entrypoint->inputs()[i]); memset(input_tensor->data.data, 0, input_tensor->bytes); } if (golden_validation_entrypoint->Invoke() != kTfLiteOk) { return kMinibenchmarkInvokeFailed; } for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) { TfLiteTensor* input_tensor = validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]); TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor( golden_validation_entrypoint->outputs()[i]); if (input_tensor->bytes != golden_output_tensor->bytes) { return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs; } memcpy(input_tensor->data.data, golden_output_tensor->data.data, golden_output_tensor->bytes); } return kMinibenchmarkSuccess; } MinibenchmarkStatus Validator::LoadDelegate() { if (!compute_settings_) { return kMinibenchmarkPreconditionNotMet; } if (opaque_delegate_) { return kMinibenchmarkSuccess; } Delegate which_delegate = Delegate_NONE; bool is_stable_delegate_path_provided = false; auto tflite_settings = compute_settings_->tflite_settings(); if (tflite_settings) { which_delegate = compute_settings_->tflite_settings()->delegate(); if (tflite_settings->stable_delegate_loader_settings()) { is_stable_delegate_path_provided = tflite_settings->stable_delegate_loader_settings()->delegate_path() && !tflite_settings->stable_delegate_loader_settings() ->delegate_path() ->str() .empty(); } } std::string delegate_name; if (is_stable_delegate_path_provided && which_delegate == Delegate_GPU) { delegate_name = "GpuModule"; } else if (is_stable_delegate_path_provided) { delegate_name = "StableDelegate"; } else { switch (which_delegate) { case Delegate_NONE: return kMinibenchmarkSuccess; case Delegate_NNAPI: delegate_name = "Nnapi"; break; case Delegate_GPU: delegate_name = "Gpu"; break; case Delegate_XNNPACK: delegate_name = "XNNPack"; break; case Delegate_EDGETPU: delegate_name = "EdgeTpu"; break; default: return kMinibenchmarkDelegateNotSupported; } } TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s", delegate_name.c_str()); if (!(delegate_plugin_ = LoadDelegatePlugin( delegate_name, *compute_settings_->tflite_settings()))) { return kMinibenchmarkDelegatePluginNotFound; } if (!(delegate_ = delegate_plugin_->Create())) { return kMinibenchmarkDelegateCreateFailed; } return kMinibenchmarkSuccess; } MinibenchmarkStatus Validator::LoadOpaqueDelegate() { if (!compute_settings_) { return kMinibenchmarkPreconditionNotMet; } bool is_stable_delegate_name_provided = false; auto tflite_settings = compute_settings_->tflite_settings(); if (!tflite_settings) { return kMinibenchmarkSuccess; } auto stable_delegate_settings = tflite_settings->stable_delegate_loader_settings(); is_stable_delegate_name_provided = stable_delegate_settings && stable_delegate_settings->delegate_name() && !stable_delegate_settings->delegate_name()->str().empty(); if (!is_stable_delegate_name_provided) { return kMinibenchmarkSuccess; } std::string delegate_name = stable_delegate_settings->delegate_name()->str(); TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s", delegate_name.c_str()); const TfLiteStableDelegate* stable_delegate = delegates::StableDelegateRegistry::RetrieveStableDelegate(delegate_name); if (!stable_delegate) { TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to load stable delegate plugin %s", delegate_name.c_str()); return kMinibenchmarkDelegatePluginNotFound; } const TfLiteOpaqueDelegatePlugin* delegate_plugin = stable_delegate->delegate_plugin; opaque_delegate_ = TfLiteOpaqueDelegatePtr( delegate_plugin->create(tflite_settings), delegate_plugin->destroy); return kMinibenchmarkSuccess; } MinibenchmarkStatus Validator::CreateInterpreter(int* delegate_error_out, int* delegated_kernels_out) { if (!delegate_error_out || !delegated_kernels_out || !model_loader_->GetModel()) { return kMinibenchmarkPreconditionNotMet; } if (interpreter_) { return kMinibenchmarkSuccess; } *delegate_error_out = 0; if (compute_settings_->tflite_settings() && compute_settings_->tflite_settings()->disable_default_delegates()) { resolver_ = std::make_unique< ::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>(); } else { resolver_ = std::make_unique<::tflite::ops::builtin::BuiltinOpResolver>(); } resolver_->AddCustom("validation/call", ::tflite::acceleration::ops::Register_CALL(), 1); resolver_->AddCustom( "validation/decode_jpeg", ::tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG(), 1); RegisterSelectedOps(resolver_.get()); tflite::InterpreterBuilder builder(*model_loader_->GetModel(), *resolver_); if (delegate_ != nullptr) { builder.AddDelegate(delegate_.get()); } if (opaque_delegate_ != nullptr) { builder.AddDelegate(opaque_delegate_.get()); } TfLiteStatus status = builder(&interpreter_); if (!interpreter_) { *delegate_error_out = delegate_plugin_ ? delegate_plugin_->GetDelegateErrno(delegate_.get()) : 0; TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Creating Interpreter failed with error code %d.", status); return kMinibenchmarkInterpreterBuilderFailed; } main_model_ = interpreter_->subgraph(0); validation_entrypoint_index_ = -1; for (int i = 0; i < interpreter_->subgraphs_size(); i++) { Subgraph* subgraph = interpreter_->subgraph(i); if (subgraph->GetName() == kValidationGraphName) { validation_entrypoint_index_ = i; validation_entrypoint_ = subgraph; } else if (subgraph->GetName() == "VALIDATION:metrics") { has_accuracy_validation_ = true; } } if (!validation_entrypoint_) { return kMinibenchmarkValidationSubgraphNotFound; } if (validation_entrypoint_->inputs().empty()) { return kMinibenchmarkValidationSubgraphHasTooFewInputs; } if (!HasTensorData(model_loader_.get(), *validation_entrypoint_, validation_entrypoint_->inputs().back())) { return kMinibenchmarkValidationInputMissing; } if (validation_entrypoint_->AllocateTensors() != kTfLiteOk) { return kMinibenchmarkAllocateTensorsFailed; } absl::flat_hash_set<int> checked_node_ids; int num_delegated_kernels = 0; for (int i = 0; i < interpreter_->execution_plan().size(); ++i) { int node_id = interpreter_->execution_plan()[i]; if (checked_node_ids.find(node_id) != checked_node_ids.end()) { continue; } const TfLiteNode& node = interpreter_->node_and_registration(node_id)->first; if (node.delegate != nullptr) { num_delegated_kernels++; checked_node_ids.insert(node_id); } } *delegated_kernels_out = num_delegated_kernels; bool fully_delegated = (num_delegated_kernels == 1 && interpreter_->execution_plan().size() == 1); if (!fully_delegated) { TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "The model will be %s executed by the delegate.", num_delegated_kernels > 0 ? "partially" : "not"); } return kMinibenchmarkSuccess; } Validator::Status Validator::RunValidation(Results* results_out) { BenchmarkStage stage = BenchmarkStage_INITIALIZATION; if (!results_out) { return Validator::Status{kMinibenchmarkPreconditionNotMet, stage}; } if (!model_loader_) { return Validator::Status{kMinibenchmarkModelReadFailed, stage}; } if (!model_loader_->Init()) { return Validator::Status{kMinibenchmarkModelInitFailed, stage}; } #define MB_RETURN_IF_ERROR(s, bs) \ { \ MinibenchmarkStatus c = (s); \ if (c != kMinibenchmarkSuccess) return Validator::Status{c, (bs)}; \ } int64_t delegate_load_start_time_us = ElapsedTimeMicros(); MB_RETURN_IF_ERROR(LoadOpaqueDelegate(), stage); MB_RETURN_IF_ERROR(LoadDelegate(), stage); MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error, &results_out->delegated_kernels), stage); int64_t delegate_load_end_time_us = ElapsedTimeMicros(); ValidatorProfiler profiler; stage = BenchmarkStage_INFERENCE; if (has_accuracy_validation_) { MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out), stage); } main_model_->SetProfiler(&profiler, 0); TfLiteStatus status = validation_entrypoint_->Invoke(); main_model_->SetProfiler(nullptr, 0); if (status != kTfLiteOk) { MB_RETURN_IF_ERROR(kMinibenchmarkInvokeFailed, stage); } int model_output_size = main_model_->outputs().size(); if (has_accuracy_validation_) { const std::string kMetricPrefix = "metrics/"; const std::string kOk("ok"); for (int i = model_output_size; i < validation_entrypoint_->outputs().size(); i++) { TfLiteTensor* tensor = validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]); std::string name = tensor->name; if (name.find(kMetricPrefix) != 0) { continue; } name = name.substr(kMetricPrefix.size()); if (kOk == name) { results_out->ok = *(tensor->data.b); } else { std::vector<float> values; int count = 1; for (int j = 0; j < tensor->dims->size; j++) { count *= tensor->dims->data[j]; } values.reserve(count); for (int j = 0; j < count; j++) { values.push_back(tensor->data.f[j]); TFLITE_LOG_PROD(TFLITE_LOG_INFO, " %s %.4f", name.c_str(), tensor->data.f[j]); } results_out->metrics[name] = values; } } TFLITE_LOG_PROD(TFLITE_LOG_INFO, " accuracy: %s", results_out->ok ? "ok" : "not ok"); } else { results_out->actual_inference_output.clear(); results_out->actual_inference_output.reserve(model_output_size); for (int i = 0; i < model_output_size; i++) { AppendTensorDataToVector( validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]), results_out->actual_inference_output); } } results_out->delegate_prep_time_us = (delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1) ? -1 : delegate_load_end_time_us - delegate_load_start_time_us; TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Delegate preparation took %d us", static_cast<int>(results_out->delegate_prep_time_us)); for (const auto& e : profiler.events()) { if (e.tag == "Invoke" && e.start_time_us != -1 && e.end_time_us != -1) { results_out->execution_time_us.push_back(e.end_time_us - e.start_time_us); TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Inference took %d us", static_cast<int>(e.end_time_us - e.start_time_us)); } } #undef MB_RETURN_IF_ERROR return Validator::Status{kMinibenchmarkSuccess}; } int64_t Validator::BootTimeMicros() { return ElapsedTimeMicros(); } int64_t Validator::WallTimeMicros() { struct timespec ts; #ifndef _WIN32 int err = clock_gettime(CLOCK_REALTIME, &ts); #else int err = 1; #endif if (err) { return -1; } return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro; } } }
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h" #include <iostream> #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "flatbuffers/flatbuffers.h" #if FLATBUFFERS_LITTLEENDIAN == 0 #include "tensorflow/lite/core/model_builder.h" #endif #include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h" #include "tensorflow/lite/acceleration/configuration/configuration.pb.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h" #include "tensorflow/lite/tools/model_loader.h" namespace tflite { namespace acceleration { namespace { using flatbuffers::FlatBufferBuilder; constexpr int kOutputTensorSize = 1001; class ValidatorTest : public ::testing::Test { protected: void SetUp() override { std::string validation_model_path = MiniBenchmarkTestHelper::DumpToTempFile( "mobilenet_quant_with_validation.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model, g_tflite_acceleration_embedded_mobilenet_validation_model_len); ASSERT_TRUE(!validation_model_path.empty()); validation_model_loader_ = std::make_unique<tools::PathModelLoader>(validation_model_path); std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile( "mobilenet_quant.tflite", g_tflite_acceleration_embedded_mobilenet_model, g_tflite_acceleration_embedded_mobilenet_model_len); ASSERT_TRUE(!plain_model_path.empty()); plain_model_loader_ = std::make_unique<tools::PathModelLoader>(plain_model_path); compute_settings_fbb_.Finish(CreateComputeSettings(compute_settings_fbb_)); default_compute_settings_ = flatbuffers::GetRoot<ComputeSettings>( compute_settings_fbb_.GetBufferPointer()); } std::unique_ptr<tools::ModelLoader> validation_model_loader_; std::unique_ptr<tools::ModelLoader> plain_model_loader_; FlatBufferBuilder compute_settings_fbb_; const ComputeSettings* default_compute_settings_; }; TEST_F(ValidatorTest, HappyPathOnCpuWithEmbeddedValidation) { ASSERT_TRUE(validation_model_loader_->Init()); Validator validator(std::move(validation_model_loader_), default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess); EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN); EXPECT_TRUE(results.ok); EXPECT_GE(results.metrics.size(), 0); EXPECT_EQ(results.delegate_error, 0); EXPECT_TRUE(results.actual_inference_output.empty()); } TEST_F(ValidatorTest, HappyPathOnCpuWithCustomValidation) { ASSERT_TRUE(plain_model_loader_->Init()); ASSERT_TRUE(validation_model_loader_->Init()); const SubGraph* main_model = plain_model_loader_->GetModel()->GetModel()->subgraphs()->Get(0); const int model_output_size = main_model->outputs()->size(); int model_input_byte_size = 1; for (int shape_i : *main_model->tensors()->Get(main_model->inputs()->Get(0))->shape()) { model_input_byte_size *= shape_i; } int batch_size = 5; FlatBufferBuilder model_with_input; CustomValidationEmbedder embedder( batch_size, {std::vector<uint8_t>(batch_size * model_input_byte_size, 1)}); EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), model_with_input), kMinibenchmarkSuccess); std::string serialized_str( reinterpret_cast<const char*>(model_with_input.GetBufferPointer()), model_with_input.GetSize()); #if FLATBUFFERS_LITTLEENDIAN == 0 tflite::FlatBufferModel::ByteSwapSerializedModel(&serialized_str, true); #endif std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile( "mobilenet_quant_with_input.tflite", reinterpret_cast<const unsigned char*>(serialized_str.c_str()), serialized_str.size()); ASSERT_TRUE(!model_path.empty()); auto model_loader = std::make_unique<tools::PathModelLoader>(model_path); Validator validator(std::move(model_loader), default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess); EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN); EXPECT_FALSE(results.ok); EXPECT_EQ(results.metrics.size(), 0); EXPECT_EQ(results.delegate_error, 0); EXPECT_EQ(results.actual_inference_output.size(), model_output_size); EXPECT_EQ(results.actual_inference_output[0].size(), batch_size * kOutputTensorSize); } TEST_F(ValidatorTest, DelegateNotSupported) { proto::ComputeSettings settings_proto; settings_proto.mutable_tflite_settings()->set_delegate(proto::CORE_ML); flatbuffers::FlatBufferBuilder fbb; const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb); Validator validator(std::move(validation_model_loader_), settings); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkDelegateNotSupported); EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION); } TEST_F(ValidatorTest, NoValidationSubgraph) { Validator validator(std::move(plain_model_loader_), default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkValidationSubgraphNotFound); EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION); } TEST_F(ValidatorTest, NoValidationInputData) { ASSERT_TRUE(plain_model_loader_->Init()); FlatBufferBuilder model_with_input; CustomValidationEmbedder embedder(1, {{}}); EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(), model_with_input), kMinibenchmarkSuccess); std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile( "mobilenet_quant_with_input.tflite", model_with_input.GetBufferPointer(), model_with_input.GetSize()); ASSERT_TRUE(!model_path.empty()); auto model_loader = std::make_unique<tools::PathModelLoader>(model_path); Validator validator(std::move(model_loader), default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkValidationInputMissing); EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION); } TEST_F(ValidatorTest, InvalidModel) { const std::string dump_path = MiniBenchmarkTestHelper::DumpToTempFile( "foo.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model, g_tflite_acceleration_embedded_mobilenet_validation_model_len - 12000); ASSERT_TRUE(!dump_path.empty()); Validator validator(std::make_unique<tools::PathModelLoader>(dump_path), default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkModelInitFailed); EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION); } TEST_F(ValidatorTest, EmptyModelLoader) { Validator validator(nullptr, default_compute_settings_); Validator::Results results; Validator::Status validation_run = validator.RunValidation(&results); EXPECT_EQ(validation_run.status, kMinibenchmarkModelReadFailed); EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ab1f52e5-859f-4781-a52a-c1677ca42465
cpp
tensorflow/tensorflow
parallel_map_dataset_op
tensorflow/core/kernels/data/parallel_map_dataset_op.cc
tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc
#include "tensorflow/core/kernels/data/parallel_map_dataset_op.h" #include <cstddef> #include <deque> #include <functional> #include <limits> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/base/call_once.h" #include "absl/status/status.h" #include "absl/strings/str_format.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/data/stats_utils.h" #include "tensorflow/core/data/unbounded_thread_pool.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/model.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/stats_aggregator.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/stringprintf.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tsl/platform/logging.h" namespace tensorflow { namespace data { constexpr const char* const ParallelMapDatasetOp::kDatasetType; constexpr const char* const ParallelMapDatasetOp::kInputDataset; constexpr const char* const ParallelMapDatasetOp::kOtherArguments; constexpr const char* const ParallelMapDatasetOp::kNumParallelCalls; constexpr const char* const ParallelMapDatasetOp::kFunc; constexpr const char* const ParallelMapDatasetOp::kTarguments; constexpr const char* const ParallelMapDatasetOp::kOutputTypes; constexpr const char* const ParallelMapDatasetOp::kOutputShapes; constexpr const char* const ParallelMapDatasetOp::kUseInterOpParallelism; constexpr const char* const ParallelMapDatasetOp::kDeterministic; constexpr const char* const ParallelMapDatasetOp::kSloppy; constexpr const char* const ParallelMapDatasetOp::kPreserveCardinality; namespace { constexpr char kParallelMapDatasetV1[] = "ParallelMapDataset"; constexpr char kParallelMapDatasetV2[] = "ParallelMapDatasetV2"; constexpr char kInvocationResults[] = "invocation_results"; constexpr char kSize[] = "size"; constexpr char kEndOfInput[] = "end_of_input"; constexpr char kErrorCode[] = "code"; constexpr char kErrorMessage[] = "error_message"; constexpr int kStatsReportingPeriodMillis = 1000; constexpr int kUnboundedThreadpoolAutotuningFactor = 10; } class ParallelMapDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t num_parallel_calls, const DataTypeVector& output_types, const std::vector<PartialTensorShape>& output_shapes, DeterminismPolicy deterministic, std::unique_ptr<CapturedFunction> captured_func, bool preserve_cardinality, bool use_unbounded_threadpool, int op_version) : Dataset(DatasetContext(ctx), input, num_parallel_calls, output_types, output_shapes, deterministic, std::move(captured_func), preserve_cardinality, use_unbounded_threadpool, op_version) {} Dataset(DatasetContext dataset_context, const DatasetBase* input, int64_t num_parallel_calls, const DataTypeVector& output_types, const std::vector<PartialTensorShape>& output_shapes, DeterminismPolicy deterministic, std::unique_ptr<CapturedFunction> captured_func, bool preserve_cardinality, bool use_unbounded_threadpool, int op_version) : DatasetBase(std::move(dataset_context)), input_(input), num_parallel_calls_(num_parallel_calls), output_types_(output_types), output_shapes_(output_shapes), deterministic_(deterministic), preserve_cardinality_(preserve_cardinality), use_unbounded_threadpool_(use_unbounded_threadpool), captured_func_(std::move(captured_func)), op_version_(op_version) { input_->Ref(); random_indexing_compatible_ = absl::OkStatus(); if (input_ != nullptr) { random_indexing_compatible_ = input_->RandomIndexingCompatible(); } } ~Dataset() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { name_utils::IteratorPrefixParams params; params.op_version = op_version_; return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix, params)}); } const DataTypeVector& output_dtypes() const override { return output_types_; } const std::vector<PartialTensorShape>& output_shapes() const override { return output_shapes_; } string DebugString() const override { name_utils::DatasetDebugStringParams params; params.op_version = op_version_; return name_utils::DatasetDebugString(ParallelMapDatasetOp::kDatasetType, params); } int64_t CardinalityInternal(CardinalityOptions options) const override { if (preserve_cardinality_) { return input_->Cardinality(options); } else { return kUnknownCardinality; } } Status Get(OpKernelContext* ctx, int64 index, std::vector<Tensor>* out_tensors) const override { TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index)); absl::call_once(instantiated_captured_func_once_, [this, ctx] { instantiated_captured_func_status_ = captured_func_->Instantiate( InstantiateCapturedFunctionParams(ctx), &instantiated_captured_func_); }); TF_RETURN_IF_ERROR(instantiated_captured_func_status_); std::vector<Tensor> args; TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args)); return instantiated_captured_func_->RunInstantiated(args, out_tensors); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { TF_RETURN_IF_ERROR(captured_func_->CheckExternalState()); return input_->CheckExternalState(); } absl::Status RandomIndexingCompatible() const override { return random_indexing_compatible_; } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); std::vector<Node*> other_arguments; DataTypeVector other_arguments_types; TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments, &other_arguments_types)); Node* num_parallel_calls = nullptr; if (op_version_ == 1) { TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_), &num_parallel_calls)); } else { TF_RETURN_IF_ERROR( b->AddScalar(num_parallel_calls_, &num_parallel_calls)); } std::vector<std::pair<StringPiece, AttrValue>> attrs; AttrValue f_attr; b->BuildAttrValue(captured_func_->func(), &f_attr); attrs.emplace_back(kFunc, f_attr); AttrValue other_arguments_types_attr; b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr); attrs.emplace_back(kTarguments, other_arguments_types_attr); AttrValue use_inter_op_parallelism_attr; b->BuildAttrValue(captured_func_->use_inter_op_parallelism(), &use_inter_op_parallelism_attr); attrs.emplace_back(kUseInterOpParallelism, use_inter_op_parallelism_attr); if (op_version_ == 1) { AttrValue sloppy_attr; b->BuildAttrValue(deterministic_.IsNondeterministic(), &sloppy_attr); attrs.emplace_back(kSloppy, sloppy_attr); } if (op_version_ == 2) { AttrValue deterministic_attr; b->BuildAttrValue(deterministic_.String(), &deterministic_attr); attrs.emplace_back(kDeterministic, deterministic_attr); } AttrValue preserve_cardinality_attr; b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr); attrs.emplace_back(kPreserveCardinality, preserve_cardinality_attr); AttrValue use_unbounded_threadpool_attr; b->BuildAttrValue(use_unbounded_threadpool_, &use_unbounded_threadpool_attr); attrs.emplace_back(kUseUnboundedThreadpool, use_unbounded_threadpool_attr); TF_RETURN_IF_ERROR(b->AddDataset( this, {std::make_pair(0, input_graph_node), std::make_pair(2, num_parallel_calls)}, {std::make_pair(1, other_arguments)}, attrs, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params), mu_(std::make_shared<mutex>()), cond_var_(std::make_shared<condition_variable>()), num_parallel_calls_(std::make_shared<model::SharedState>( params.dataset->num_parallel_calls_, mu_, cond_var_)), deterministic_(params.dataset->deterministic_.IsDeterministic() || params.dataset->deterministic_.IsDefault()), preserve_cardinality_(params.dataset->preserve_cardinality_), use_unbounded_threadpool_(params.dataset->use_unbounded_threadpool_), autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {} ~Iterator() override { CancelThreads(true); input_impl_.reset(); if (deregister_fn_) deregister_fn_(); } bool SymbolicCheckpointCompatible() const override { return deterministic_; } Status Initialize(IteratorContext* ctx) override { mutex_lock l(*mu_); interleave_depth_ = ctx->interleave_depth(); if (use_unbounded_threadpool_) { unbounded_thread_pool_ = std::make_unique<UnboundedThreadPool>( ctx->env(), "tf_data_map_unbounded_thread_pool"); } if (num_parallel_calls_->value == model::kAutotune) { num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx); } cancellation_manager_ = std::make_unique<CancellationManager>(); TF_RETURN_IF_ERROR(RegisterCancellationCallback( ctx->cancellation_manager(), [this]() { CancelThreads(false); }, &deregister_fn_)); auto params = std::make_unique<IteratorContext::Params>(ctx); params->cancellation_manager = cancellation_manager_.get(); auto iter_ctx = std::make_unique<IteratorContext>(*params); TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator( iter_ctx.get(), this, prefix(), &input_impl_)); ctx->MergeCheckpoint(iter_ctx->checkpoint()); TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate( ctx, &instantiated_captured_func_)); if (ctx->warm_start() && !ctx->is_restoring()) { EnsureThreadsStarted(ctx); } return absl::OkStatus(); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { std::shared_ptr<InvocationResult> result; { mutex_lock l(*mu_); EnsureThreadsStarted(ctx); while (ShouldWait(&result)) { RecordStop(ctx); cond_var_->wait(l); RecordStart(ctx); } if (cancelled_) { return errors::Cancelled("Iterator was cancelled"); } } RecordStop(ctx); result->notification.WaitForNotification(); RecordStart(ctx); tsl::profiler::TraceMe traceme([&] { return tsl::profiler::TraceMeEncode("ParallelMapConsume", {{"element_id", result->uid}}); }); return ProcessResult(ctx, result, out_tensors, end_of_sequence); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { std::shared_ptr<model::Parameter> parameter; double max_parallelism_value = ctx->runner_threadpool_size(); if (use_unbounded_threadpool_) { max_parallelism_value *= kUnboundedThreadpoolAutotuningFactor; } if (num_parallel_calls_ && dataset()->num_parallel_calls_ == model::kAutotune) { parameter = model::MakeParameter( "parallelism", num_parallel_calls_, 1, max_parallelism_value, GetAutotuneDefaultParallelism(ctx)); } else { parameter = model::MakeParameter("parallelism", num_parallel_calls_, 1, max_parallelism_value); } std::optional<int64_t> estimated_element_size = dataset()->GetEstimatedElementSize(); if (!estimated_element_size) { VLOG(2) << absl::StrFormat( "Cannot estimate the size of the output tensor because the " "output shape of node %s(id:%d) is only partially known.", args.name, args.id); } return model::MakeAsyncKnownRatioNode( std::move(args), 1, {std::move(parameter)}, false, estimated_element_size); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus( dataset()->captured_func_->CheckExternalState())); if (ctx->symbolic_checkpoint()) { return absl::OkStatus(); } mutex_lock l(*mu_); while (num_calls_ > 0) { cond_var_->wait(l); } if (num_calls_ != 0) { return errors::FailedPrecondition( "Unexpected outstanding calls encountered."); } TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); TF_RETURN_IF_ERROR(writer->WriteScalar( prefix(), absl::StrCat(kInvocationResults, "_", kSize), invocation_results_.size())); for (size_t i = 0; i < invocation_results_.size(); i++) { const auto& result = *(invocation_results_[i]); std::string element_prefix = absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]"); TF_RETURN_IF_ERROR( WriteStatusLocked(writer, element_prefix, result.status)); TF_RETURN_IF_ERROR(writer->WriteScalar(element_prefix, kSize, result.return_values.size())); for (size_t j = 0; j < result.return_values.size(); j++) { TF_RETURN_IF_ERROR(writer->WriteTensor(element_prefix, absl::StrCat("[", j, "]"), result.return_values[j])); } TF_RETURN_IF_ERROR( writer->WriteScalar(element_prefix, kEndOfInput, static_cast<int64_t>(result.end_of_input))); } return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(*mu_); TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); DCHECK(invocation_results_.empty()); if (ctx->symbolic_checkpoint()) { return absl::OkStatus(); } int64_t invocation_results_size; TF_RETURN_IF_ERROR(reader->ReadScalar( prefix(), absl::StrCat(kInvocationResults, "_", kSize), &invocation_results_size)); for (size_t i = 0; i < invocation_results_size; i++) { invocation_results_.push_back(std::make_shared<InvocationResult>(ctx)); auto& result = *invocation_results_.back(); std::string element_prefix = absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]"); TF_RETURN_IF_ERROR( ReadStatusLocked(reader, element_prefix, &result.status)); size_t num_return_values; { int64_t size; TF_RETURN_IF_ERROR(reader->ReadScalar(element_prefix, kSize, &size)); num_return_values = static_cast<size_t>(size); if (num_return_values != size) { return errors::InvalidArgument( element_prefix, ",", kSize, ": ", size, " is not a valid value of type size_t."); } } result.return_values.reserve(num_return_values); for (size_t j = 0; j < num_return_values; j++) { result.return_values.emplace_back(); TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), element_prefix, absl::StrCat("[", j, "]"), &result.return_values.back())); } int64_t end_of_input; TF_RETURN_IF_ERROR( reader->ReadScalar(element_prefix, kEndOfInput, &end_of_input)); result.end_of_input = static_cast<bool>(end_of_input); RecordBufferEnqueue(ctx, result.return_values); result.notification.Notify(); } return absl::OkStatus(); } TraceMeMetadata GetTraceMeMetadata() const override { int64_t parallelism = -1; if (mu_->try_lock()) { parallelism = num_parallel_calls_->value; mu_->unlock(); } data::TraceMeMetadata result; result.push_back( std::make_pair("autotune", autotune_ ? "true" : "false")); result.push_back( std::make_pair("deterministic", deterministic_ ? "true" : "false")); result.push_back( std::make_pair("use_unbounded_threadpool", use_unbounded_threadpool_ ? "true" : "false")); result.push_back(std::make_pair( "parallelism", parallelism == -1 ? kTraceInfoUnavailable : strings::Printf("%lld", static_cast<long long>(parallelism)))); result.push_back(std::make_pair( "interleave_depth", strings::Printf("%lld", static_cast<long long>(interleave_depth_)))); return result; } private: struct InvocationResult { explicit InvocationResult(IteratorContext* ctx) : uid(tensorflow::EnvTime::NowNanos()), checkpoint(MemoryCheckpoint{ctx->id_registry()}) {} Notification notification; Status status; std::vector<Tensor> return_values; bool end_of_input = false; const int64_t uid; MemoryCheckpoint checkpoint; }; void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) { cancellation_manager_->StartCancel(); mutex_lock l(*mu_); cancelled_ = true; cond_var_->notify_all(); while (wait && num_calls_ > 0) { cond_var_->wait(l); } } void EnsureThreadsStarted(IteratorContext* ctx) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) { if (!runner_thread_) { auto ctx_copy = std::make_shared<IteratorContext>(*ctx); runner_thread_ = ctx->StartThread( "tf_data_parallel_map", std::bind(&Iterator::RunnerThread, this, ctx_copy)); if (ctx->stats_aggregator()) { stats_thread_ = ctx->StartThread( "tf_data_parallel_map_stats", std::bind(&Iterator::StatsThread, this, ctx_copy)); } } } void CallCompleted(const std::shared_ptr<IteratorContext>& ctx, const std::shared_ptr<InvocationResult>& result) TF_LOCKS_EXCLUDED(*mu_) { mutex_lock l(*mu_); num_calls_--; result->notification.Notify(); cond_var_->notify_all(); } void CallFunction(const std::shared_ptr<IteratorContext>& ctx, const std::shared_ptr<InvocationResult>& result) TF_LOCKS_EXCLUDED(*mu_) { tsl::profiler::TraceMe traceme([&] { return tsl::profiler::TraceMeEncode("ParallelMapProduce", {{"element_id", result->uid}}); }); std::vector<Tensor> input_element; result->status = input_impl_->GetNext(ctx.get(), &input_element, &result->end_of_input); result->checkpoint.Merge(ctx->checkpoint()); if (result->end_of_input || !result->status.ok()) { CallCompleted(ctx, result); return; } auto done = [this, ctx, result](Status status) { if (!status.ok()) { result->status = AddErrorContext(status); } RecordBufferEnqueue(ctx.get(), result->return_values); CallCompleted(ctx, result); }; if (use_unbounded_threadpool_) { auto runner_fn = [this](std::function<void()> fn) { this->unbounded_thread_pool_->Schedule(fn); }; instantiated_captured_func_->RunAsync( runner_fn, ctx->cancellation_manager(), ctx->collective_executor(), std::move(input_element), &result->return_values, done, model_node()); } else if (dataset()->captured_func_->use_inter_op_parallelism()) { instantiated_captured_func_->RunAsync( ctx.get(), std::move(input_element), &result->return_values, std::move(done), model_node()); } else { auto fn = std::bind( [this, ctx, result](std::vector<Tensor> input_element) { return instantiated_captured_func_->Run( ctx.get(), std::move(input_element), &result->return_values, model_node()); }, std::move(input_element)); (*ctx->runner())( [this, ctx, fn = std::move(fn), done = std::move(done)]() { Status s; if (IsRecording(ctx.get())) { s = fn(); } else { RecordStart(ctx.get()); s = fn(); RecordStop(ctx.get()); } done(s); }); } } Status ProcessResult(IteratorContext* ctx, const std::shared_ptr<InvocationResult>& result, std::vector<Tensor>* out_tensors, bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) { ctx->MergeCheckpoint(&result->checkpoint); if (!result->end_of_input && result->status.ok()) { *out_tensors = std::move(result->return_values); RecordBufferDequeue(ctx, *out_tensors); *end_of_sequence = false; return absl::OkStatus(); } if (errors::IsOutOfRange(result->status)) { if (preserve_cardinality_) { return errors::InvalidArgument( "Function invocation produced OutOfRangeError: ", result->status.message()); } else { *end_of_sequence = true; return absl::OkStatus(); } } *end_of_sequence = result->end_of_input; return result->status; } void RunnerThread(const std::shared_ptr<IteratorContext>& ctx) TF_LOCKS_EXCLUDED(*mu_) { RecordStart(ctx.get()); auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); }); std::vector<std::shared_ptr<InvocationResult>> new_calls; { tf_shared_lock l(*mu_); new_calls.reserve(num_parallel_calls_->value); } auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool { int64_t num_parallel_calls = num_parallel_calls_->value; return num_calls_ >= num_parallel_calls || invocation_results_.size() >= num_parallel_calls; }; while (true) { { mutex_lock l(*mu_); while (!cancelled_ && busy()) { RecordStop(ctx.get()); cond_var_->wait(l); RecordStart(ctx.get()); } if (cancelled_) { return; } while (!busy()) { invocation_results_.push_back( std::make_shared<InvocationResult>(ctx.get())); new_calls.push_back(invocation_results_.back()); num_calls_++; } cond_var_->notify_all(); } for (const auto& call : new_calls) { CallFunction(ctx, call); } new_calls.clear(); } } bool ShouldWait(std::shared_ptr<InvocationResult>* result) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) { if (cancelled_) { return false; } if (!deterministic_) { for (auto it = invocation_results_.begin(); it != invocation_results_.end(); ++it) { if ((*it)->notification.HasBeenNotified() && (it == invocation_results_.begin() || !(*it)->end_of_input)) { std::swap(*result, *it); invocation_results_.erase(it); cond_var_->notify_all(); return false; } } } else if (!invocation_results_.empty()) { std::swap(*result, invocation_results_.front()); invocation_results_.pop_front(); cond_var_->notify_all(); return false; } return true; } void StatsThread(const std::shared_ptr<IteratorContext>& ctx) TF_LOCKS_EXCLUDED(*mu_) { for (int64_t step = 0;; ++step) { int num_calls; int num_parallel_calls; { mutex_lock l(*mu_); if (step != 0 && !cancelled_) { cond_var_->wait_for( l, std::chrono::milliseconds(kStatsReportingPeriodMillis)); } if (cancelled_) { return; } num_calls = num_calls_; num_parallel_calls = num_parallel_calls_->value; } if (num_parallel_calls == 0) { num_parallel_calls = 1; } ctx->stats_aggregator()->AddScalar( stats_utils::ThreadUtilizationScalarName(dataset()->node_name()), static_cast<float>(num_calls) / static_cast<float>(num_parallel_calls), step); } } Status WriteStatusLocked(IteratorStateWriter* writer, const std::string& prefix, const Status& status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) { TF_RETURN_IF_ERROR( writer->WriteScalar(prefix, absl::StrCat("_", kErrorCode), static_cast<int64_t>(status.code()))); if (!status.ok()) { TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, absl::StrCat("_", kErrorMessage), std::string(status.message()))); } return absl::OkStatus(); } Status ReadStatusLocked(IteratorStateReader* reader, const std::string& prefix, Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) { int64_t code_int; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix, absl::StrCat("_", kErrorCode), &code_int)); absl::StatusCode code = static_cast<absl::StatusCode>(code_int); if (code != absl::StatusCode::kOk) { tstring error_message; TF_RETURN_IF_ERROR(reader->ReadScalar( prefix, absl::StrCat("_", kErrorMessage), &error_message)); *status = Status(code, error_message); } else { *status = absl::OkStatus(); } return absl::OkStatus(); } const std::shared_ptr<mutex> mu_; const std::shared_ptr<condition_variable> cond_var_; const std::shared_ptr<model::SharedState> num_parallel_calls_; const bool deterministic_; const bool preserve_cardinality_; const bool use_unbounded_threadpool_; const bool autotune_; int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0; std::unique_ptr<CancellationManager> cancellation_manager_; std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_; std::unique_ptr<IteratorBase> input_impl_; std::deque<std::shared_ptr<InvocationResult>> invocation_results_ TF_GUARDED_BY(*mu_); bool cancelled_ TF_GUARDED_BY(*mu_) = false; std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_); std::unique_ptr<Thread> stats_thread_ TF_GUARDED_BY(*mu_); std::unique_ptr<UnboundedThreadPool> unbounded_thread_pool_; std::function<void()> deregister_fn_; int64 interleave_depth_ = -1; }; const DatasetBase* const input_; const int64_t num_parallel_calls_; const DataTypeVector output_types_; const std::vector<PartialTensorShape> output_shapes_; const DeterminismPolicy deterministic_; const bool preserve_cardinality_; const bool use_unbounded_threadpool_; const std::unique_ptr<CapturedFunction> captured_func_; const int op_version_; mutable absl::once_flag instantiated_captured_func_once_; mutable absl::Status instantiated_captured_func_status_; mutable std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_; absl::Status random_indexing_compatible_; }; ParallelMapDatasetOp::ParallelMapDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx), op_version_(ctx->HasAttr(kSloppy) ? 1 : 2) { FunctionMetadata::Params params; OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism, &params.use_inter_op_parallelism)); OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_)); if (op_version_ == 1) { bool sloppy; OP_REQUIRES_OK(ctx, ctx->GetAttr(kSloppy, &sloppy)); if (sloppy) { deterministic_ = DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic); } else { deterministic_ = DeterminismPolicy(DeterminismPolicy::Type::kDefault); } use_unbounded_threadpool_ = false; } if (op_version_ == 2) { std::string deterministic; OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic)); OP_REQUIRES_OK( ctx, DeterminismPolicy::FromString(deterministic, &deterministic_)); OP_REQUIRES_OK( ctx, ctx->GetAttr(kUseUnboundedThreadpool, &use_unbounded_threadpool_)); } OP_REQUIRES_OK(ctx, ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_)); } void ParallelMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { int64_t num_parallel_calls; if (op_version_ == 1) { int32_t parallel_calls; OP_REQUIRES_OK( ctx, ParseScalarArgument(ctx, kNumParallelCalls, &parallel_calls)); num_parallel_calls = parallel_calls; } if (op_version_ == 2) { OP_REQUIRES_OK( ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls)); } OP_REQUIRES( ctx, num_parallel_calls > 0 || num_parallel_calls == model::kAutotune, errors::InvalidArgument("num_parallel_calls must be greater than zero.")); std::unique_ptr<CapturedFunction> captured_func; OP_REQUIRES_OK(ctx, CapturedFunction::Create(ctx, func_metadata_, kOtherArguments, &captured_func)); if (num_parallel_calls == model::kAutotune) { metrics::RecordTFDataAutotune(kDatasetType); } *output = new Dataset(ctx, input, num_parallel_calls, output_types_, output_shapes_, deterministic_, std::move(captured_func), preserve_cardinality_, use_unbounded_threadpool_, op_version_); } std::unique_ptr<DatasetBase> MakeDataServiceUncompressDataset( DatasetBase* input, std::unique_ptr<CapturedFunction> captured_function, const DataTypeVector& output_types, const std::vector<PartialTensorShape>& output_shapes) { DatasetContext::Params param; param.type_string = kParallelMapDatasetV2; param.node_name = kParallelMapDatasetV2; return std::make_unique<ParallelMapDatasetOp::Dataset>( DatasetContext(std::move(param)), input, model::kAutotune, output_types, output_shapes, DeterminismPolicy(DeterminismPolicy::Type::kDefault), std::move(captured_function), true, false, 2); } namespace { REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV1).Device(DEVICE_CPU), ParallelMapDatasetOp); REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV2).Device(DEVICE_CPU), ParallelMapDatasetOp); REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV1); REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV2); } } }
#include "tensorflow/core/kernels/data/parallel_map_dataset_op.h" #include <gtest/gtest.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/framework/tensor_shape.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "parallel_map_dataset"; constexpr int kOpVersion = 2; class ParallelMapDatasetParams : public DatasetParams { public: template <typename T> ParallelMapDatasetParams( T input_dataset_params, std::vector<Tensor> other_arguments, int num_parallel_calls, FunctionDefHelper::AttrValueWrapper func, std::vector<FunctionDef> func_lib, DataTypeVector type_arguments, const DataTypeVector& output_dtypes, const std::vector<PartialTensorShape>& output_shapes, bool use_inter_op_parallelism, const std::string& deterministic, bool preserve_cardinality, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), other_arguments_(std::move(other_arguments)), num_parallel_calls_(num_parallel_calls), func_(std::move(func)), func_lib_(std::move(func_lib)), type_arguments_(std::move(type_arguments)), use_inter_op_parallelism_(use_inter_op_parallelism), deterministic_(deterministic), preserve_cardinality_(preserve_cardinality) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); op_version_ = kOpVersion; name_utils::IteratorPrefixParams params; params.op_version = op_version_; iterator_prefix_ = name_utils::IteratorPrefix( input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix(), params); } std::vector<Tensor> GetInputTensors() const override { auto input_tensors = other_arguments_; input_tensors.emplace_back( CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_})); return input_tensors; } Status GetInputNames(std::vector<string>* input_names) const override { input_names->emplace_back(ParallelMapDatasetOp::kInputDataset); for (int i = 0; i < other_arguments_.size(); ++i) { input_names->emplace_back( absl::StrCat(ParallelMapDatasetOp::kOtherArguments, "_", i)); } input_names->emplace_back(ParallelMapDatasetOp::kNumParallelCalls); return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { *attr_vector = {{"f", func_}, {"Targuments", type_arguments_}, {"output_shapes", output_shapes_}, {"output_types", output_dtypes_}, {"use_inter_op_parallelism", use_inter_op_parallelism_}, {"deterministic", deterministic_}, {"preserve_cardinality", preserve_cardinality_}, {"metadata", ""}}; return absl::OkStatus(); } string dataset_type() const override { return ParallelMapDatasetOp::kDatasetType; } std::vector<FunctionDef> func_lib() const override { return func_lib_; } private: std::vector<Tensor> other_arguments_; int num_parallel_calls_; FunctionDefHelper::AttrValueWrapper func_; std::vector<FunctionDef> func_lib_; DataTypeVector type_arguments_; bool use_inter_op_parallelism_; std::string deterministic_; bool preserve_cardinality_; }; class ParallelMapDatasetOpTest : public DatasetOpsTestBase {}; FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name, const DataType& dtype) { return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}}); } ParallelMapDatasetParams ParallelMapDatasetParams1() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 1, MapFunc("XTimesTwo", DT_INT64), {test::function::XTimesTwo()}, {}, {DT_INT64}, {PartialTensorShape({})}, false, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams2() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 2, MapFunc("XTimesTwo", DT_INT64), {test::function::XTimesTwo()}, {}, {DT_INT64}, {PartialTensorShape({})}, true, DeterminismPolicy::kNondeterministic, true, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams3() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 3, MapFunc("XTimesFour", DT_INT64), {test::function::XTimesTwo(), test::function::XTimesFour()}, {}, {DT_INT64}, {PartialTensorShape({})}, true, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams4() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 4, MapFunc("XTimesTwo", DT_INT64), {test::function::XTimesTwo()}, {}, {DT_INT64}, {PartialTensorShape({})}, false, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams5() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, model::kAutotune, MapFunc("XTimesFour", DT_INT64), {test::function::XTimesTwo(), test::function::XTimesFour()}, {}, {DT_INT64}, {PartialTensorShape({})}, true, DeterminismPolicy::kNondeterministic, true, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams6() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 4, MapFunc("XTimesFour", DT_INT64), {test::function::XTimesTwo(), test::function::XTimesFour()}, {}, {DT_INT64}, {PartialTensorShape({})}, true, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams7() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, 2, MapFunc("XTimesFour", DT_INT64), {test::function::XTimesTwo(), test::function::XTimesFour()}, {}, {DT_INT64}, {PartialTensorShape({})}, false, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams8() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, model::kAutotune, MapFunc("XTimesFour", DT_INT64), {test::function::XTimesTwo(), test::function::XTimesFour()}, {}, {DT_INT64}, {PartialTensorShape({})}, false, DeterminismPolicy::kNondeterministic, true, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParams9() { return ParallelMapDatasetParams( BatchDatasetParams(RangeDatasetParams(0, 4, 1), 3, false, false, {DT_INT64}, {PartialTensorShape({-1})}, "batch_dataset"), {}, 1, MapFunc("XTimesTwo", DT_INT64), {test::function::XTimesTwo()}, {}, {DT_INT64}, {PartialTensorShape({-1})}, false, DeterminismPolicy::kDeterministic, false, kNodeName); } ParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls() { return ParallelMapDatasetParams( RangeDatasetParams(0, 10, 3), {}, -4, MapFunc("XTimesTwo", DT_INT64), {test::function::XTimesTwo()}, {}, {DT_INT64}, {PartialTensorShape({})}, true, DeterminismPolicy::kNondeterministic, true, kNodeName); } std::vector<GetNextTestCase<ParallelMapDatasetParams>> GetNextTestCases() { return {{ParallelMapDatasetParams1(), CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), true}, {ParallelMapDatasetParams2(), CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), false}, {ParallelMapDatasetParams3(), CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), true}, {ParallelMapDatasetParams4(), CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), true}, {ParallelMapDatasetParams5(), CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), false}, { ParallelMapDatasetParams6(), CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), true}, { ParallelMapDatasetParams9(), {CreateTensor<int64_t>(TensorShape{3}, {0, 2, 4}), CreateTensor<int64_t>(TensorShape{1}, {6})}, true}}; } ITERATOR_GET_NEXT_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams, GetNextTestCases()) TEST_F(ParallelMapDatasetOpTest, DatasetNodeName) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(ParallelMapDatasetOpTest, DatasetTypeString) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); name_utils::OpNameParams params; params.op_version = dataset_params.op_version(); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(ParallelMapDatasetOp::kDatasetType, params))); } TEST_F(ParallelMapDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64})); } TEST_F(ParallelMapDatasetOpTest, DatasetOutputShapes) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})})); } TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeHasValue) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); auto element_size = dataset_->GetEstimatedElementSize(); ASSERT_TRUE(element_size.has_value()); EXPECT_GT(element_size.value(), 0); } TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeNoValue) { auto dataset_params = ParallelMapDatasetParams9(); TF_ASSERT_OK(Initialize(dataset_params)); EXPECT_FALSE(dataset_->GetEstimatedElementSize().has_value()); } std::vector<CardinalityTestCase<ParallelMapDatasetParams>> CardinalityTestCases() { return {{ParallelMapDatasetParams1(), kUnknownCardinality}, {ParallelMapDatasetParams2(), 4}, {ParallelMapDatasetParams3(), kUnknownCardinality}, {ParallelMapDatasetParams4(), kUnknownCardinality}, {ParallelMapDatasetParams5(), 4}, {ParallelMapDatasetParams6(), kUnknownCardinality}}; } DATASET_CARDINALITY_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams, CardinalityTestCases()) TEST_F(ParallelMapDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64})); } TEST_F(ParallelMapDatasetOpTest, IteratorOutputShapes) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})})); } TEST_F(ParallelMapDatasetOpTest, IteratorPrefix) { auto dataset_params = ParallelMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); name_utils::IteratorPrefixParams params; params.op_version = dataset_params.op_version(); TF_ASSERT_OK(CheckIteratorPrefix( name_utils::IteratorPrefix(ParallelMapDatasetOp::kDatasetType, dataset_params.iterator_prefix(), params))); } std::vector<IteratorSaveAndRestoreTestCase<ParallelMapDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{ParallelMapDatasetParams1(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), true}, {ParallelMapDatasetParams2(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), false}, {ParallelMapDatasetParams3(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), true}, {ParallelMapDatasetParams4(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}), true}, {ParallelMapDatasetParams5(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), false}, { ParallelMapDatasetParams6(), {0, 1, 5}, CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}), true}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams, IteratorSaveAndRestoreTestCases()) TEST_F(ParallelMapDatasetOpTest, InvalidNumParallelCalls) { auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls(); EXPECT_EQ(Initialize(dataset_params).code(), absl::StatusCode::kInvalidArgument); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1da53164-e7c9-403c-8625-eaec6fdf5874
cpp
tensorflow/tensorflow
wav_to_spectrogram
tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc
tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc
#include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h" #include <vector> #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/default_device.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session.h" using tensorflow::DT_FLOAT; using tensorflow::DT_UINT8; using tensorflow::Output; using tensorflow::TensorShape; tensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav, int32_t window_size, int32_t stride, float brightness, const tensorflow::string& output_image) { auto root = tensorflow::Scope::NewRootScope(); using namespace tensorflow::ops; Output file_reader = tensorflow::ops::ReadFile(root.WithOpName("input_wav"), input_wav); DecodeWav wav_decoder = DecodeWav(root.WithOpName("wav_decoder"), file_reader); Output spectrogram = AudioSpectrogram(root.WithOpName("spectrogram"), wav_decoder.audio, window_size, stride); Output brightness_placeholder = Placeholder(root.WithOpName("brightness_placeholder"), DT_FLOAT, Placeholder::Attrs().Shape(TensorShape({}))); Output mul = Mul(root.WithOpName("mul"), spectrogram, brightness_placeholder); Output min_const = Const(root.WithOpName("min_const"), 255.0f); Output min = Minimum(root.WithOpName("min"), mul, min_const); Output cast = Cast(root.WithOpName("cast"), min, DT_UINT8); Output expand_dims_const = Const(root.WithOpName("expand_dims_const"), -1); Output expand_dims = ExpandDims(root.WithOpName("expand_dims"), cast, expand_dims_const); Output squeeze = Squeeze(root.WithOpName("squeeze"), expand_dims, Squeeze::Attrs().Axis({0})); Output png_encoder = EncodePng(root.WithOpName("png_encoder"), squeeze); tensorflow::ops::WriteFile file_writer = tensorflow::ops::WriteFile( root.WithOpName("output_image"), output_image, png_encoder); tensorflow::GraphDef graph; TF_RETURN_IF_ERROR(root.ToGraphDef(&graph)); std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(tensorflow::SessionOptions())); TF_RETURN_IF_ERROR(session->Create(graph)); tensorflow::Tensor brightness_tensor(DT_FLOAT, TensorShape({})); brightness_tensor.scalar<float>()() = brightness; TF_RETURN_IF_ERROR( session->Run({{"brightness_placeholder", brightness_tensor}}, {}, {"output_image"}, nullptr)); return absl::OkStatus(); }
#include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/wav/wav_io.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" TEST(WavToSpectrogramTest, WavToSpectrogramTest) { const tensorflow::string input_wav = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "input_wav.wav"); const tensorflow::string output_image = tensorflow::io::JoinPath( tensorflow::testing::TmpDir(), "output_image.png"); float audio[8] = {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f}; tensorflow::string wav_string; TF_ASSERT_OK( tensorflow::wav::EncodeAudioAsS16LEWav(audio, 44100, 1, 8, &wav_string)); TF_ASSERT_OK(tensorflow::WriteStringToFile(tensorflow::Env::Default(), input_wav, wav_string)); TF_ASSERT_OK(WavToSpectrogram(input_wav, 4, 4, 64.0f, output_image)); TF_EXPECT_OK(tensorflow::Env::Default()->FileExists(output_image)); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
52916fe4-bde7-4bfd-a579-9931b007d93f
cpp
tensorflow/tensorflow
math
third_party/xla/xla/hlo/builder/lib/math.cc
third_party/xla/xla/hlo/builder/lib/math_test.cc
#include "xla/hlo/builder/lib/math.h" #include <algorithm> #include <array> #include <cmath> #include <functional> #include <limits> #include <vector> #include "absl/algorithm/container.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/builder/lib/arithmetic.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/lib/loops.h" #include "xla/hlo/builder/lib/math_impl.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/primitive_util.h" #include "xla/shape.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { template <typename FP> XlaOp EvaluatePolynomial(XlaOp x, absl::Span<const FP> coefficients) { static_assert(std::is_floating_point<FP>::value, "Template-argument 'FP' must be a floating-point type"); if (coefficients.empty()) { return ScalarLike(x, FP(0.0)); } XlaOp poly = ScalarLike(x, coefficients[0]); for (int i = 1; i < coefficients.size(); ++i) { FP c = coefficients[i]; poly = poly * x + ScalarLike(x, c); } return poly; } template <typename FP> XlaOp EvaluateChebyshevPolynomial(XlaOp x, absl::Span<const FP> coefficients) { static_assert(std::is_floating_point<FP>::value, "Template-argument 'FP' must be a floating-point type"); XlaOp b0 = ScalarLike(x, 0.0); XlaOp b1 = ScalarLike(x, 0.0); XlaOp b2 = ScalarLike(x, 0.0); for (FP c : coefficients) { b2 = b1; b1 = b0; b0 = x * b1 - b2 + ScalarLike(x, c); } return ScalarLike(x, 0.5) * (b0 - b2); } } static XlaOp DoWithUpcastToF32(XlaOp operand, absl::Span<const PrimitiveType> upcast_types, const std::function<XlaOp(XlaOp)>& operation) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); PrimitiveType elem_ty = shape.element_type(); bool needs_upcast = upcast_types.empty() ? primitive_util::BitWidth(shape.element_type()) <= 16 : absl::c_linear_search(upcast_types, elem_ty); if (needs_upcast) { operand = ConvertElementType(operand, F32); } XlaOp result = operation(operand); if (needs_upcast) { result = ConvertElementType(result, elem_ty); } return result; }); } static absl::Status EnsureOperandIsRealFp(absl::string_view op_name, XlaOp operand) { auto& b = *operand.builder(); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); auto elem_ty = shape.element_type(); if (!primitive_util::IsFloatingPointType(elem_ty)) { return InvalidArgument( "Operands to %s must be real-valued floating-point, but got %s", op_name, PrimitiveType_Name(elem_ty)); } return absl::OkStatus(); } XlaOp IsPosInf(XlaOp operand) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsPosInf", operand)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); return Eq(operand, MaxValue(&b, shape.element_type())); }); } XlaOp IsNegInf(XlaOp operand) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegInf", operand)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); return Eq(operand, MinValue(&b, shape.element_type())); }); } XlaOp IsInf(XlaOp operand) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsInf", operand)); return IsPosInf(Abs(operand)); }); } XlaOp IsNan(XlaOp operand) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNan", operand)); return Ne(operand, operand); }); } XlaOp IsNegZero(XlaOp operand) { auto& b = *operand.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegZero", operand)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand)); switch (shape.element_type()) { case F64: return Eq(BitcastConvertType(operand, U64), ConstantR0WithType(&b, U64, uint64_t{1} << 63)); case F32: return Eq(BitcastConvertType(operand, U32), ConstantR0WithType(&b, U32, uint32_t{1} << 31)); case F8E3M4: case F8E4M3: case F8E5M2: case F8E4M3FN: case F8E4M3B11FNUZ: case F8E5M2FNUZ: case F8E4M3FNUZ: case F16: case BF16: return Eq(BitcastConvertType(ConvertElementType(operand, F32), U32), ConstantR0WithType(&b, U32, uint32_t{1} << 31)); default: LOG(FATAL) << "Expected real fp type."; } }); } XlaOp Square(XlaOp operand) { return operand * operand; } XlaOp Reciprocal(XlaOp operand) { return ScalarLike(operand, 1.0) / operand; } static XlaOp ErfcImpl32(XlaOp x) { const double kMaxlog = 88.72283905206835; static const std::array<float, 9> kErfcPCoefficient{ +2.326819970068386E-2, -1.387039388740657E-1, +3.687424674597105E-1, -5.824733027278666E-1, +6.210004621745983E-1, -4.944515323274145E-1, +3.404879937665872E-1, -2.741127028184656E-1, +5.638259427386472E-1, }; static const std::array<float, 8> kErfcRCoefficient{ -1.047766399936249E+1, +1.297719955372516E+1, -7.495518717768503E+0, +2.921019019210786E+0, -1.015265279202700E+0, +4.218463358204948E-1, -2.820767439740514E-1, +5.641895067754075E-1, }; XlaOp abs_x = Abs(x); XlaOp z = Exp(-x * x); XlaOp q = ScalarLike(x, 1) / abs_x; XlaOp y = q * q; XlaOp p = Select(Lt(abs_x, ScalarLike(x, 2.0)), EvaluatePolynomial<float>(y, kErfcPCoefficient), EvaluatePolynomial<float>(y, kErfcRCoefficient)); y = z * q * p; XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y); return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp); } static XlaOp ErfImpl32Cephes(XlaOp x) { static const std::array<float, 7> kErfTCoefficient{ +7.853861353153693E-5, -8.010193625184903E-4, +5.188327685732524E-3, -2.685381193529856E-2, +1.128358514861418E-1, -3.761262582423300E-1, +1.128379165726710E+0, }; return x * EvaluatePolynomial<float>(x * x, kErfTCoefficient); } static XlaOp ErfcImpl64(XlaOp x) { const double kMaxlog = 7.09782712893383996843E2; static const std::array<double, 9> kErfcPCoefficient{ 2.46196981473530512524E-10, 5.64189564831068821977E-1, 7.46321056442269912687E0, 4.86371970985681366614E1, 1.96520832956077098242E2, 5.26445194995477358631E2, 9.34528527171957607540E2, 1.02755188689515710272E3, 5.57535335369399327526E2}; static const std::array<double, 9> kErfcQCoefficient{ 1.00000000000000000000E0, 1.32281951154744992508E1, 8.67072140885989742329E1, 3.54937778887819891062E2, 9.75708501743205489753E2, 1.82390916687909736289E3, 2.24633760818710981792E3, 1.65666309194161350182E3, 5.57535340817727675546E2}; static const std::array<double, 6> kErfcRCoefficient{ 5.64189583547755073984E-1, 1.27536670759978104416E0, 5.01905042251180477414E0, 6.16021097993053585195E0, 7.40974269950448939160E0, 2.97886665372100240670E0}; static const std::array<double, 7> kErfcSCoefficient{ 1.00000000000000000000E0, 2.26052863220117276590E0, 9.39603524938001434673E0, 1.20489539808096656605E1, 1.70814450747565897222E1, 9.60896809063285878198E0, 3.36907645100081516050E0}; XlaOp z = -x * x; XlaOp abs_x = Abs(x); XlaOp y = Select(Lt(abs_x, ScalarLike(x, 8.0)), Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcPCoefficient) / EvaluatePolynomial<double>(abs_x, kErfcQCoefficient), Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcRCoefficient) / EvaluatePolynomial<double>(abs_x, kErfcSCoefficient)); XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y); return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp); } static XlaOp ErfImpl64(XlaOp x) { static std::array<double, 5> kErfTCoefficient{ 9.60497373987051638749E0, 9.00260197203842689217E1, 2.23200534594684319226E3, 7.00332514112805075473E3, 5.55923013010394962768E4}; static std::array<double, 6> kErfUCoefficient{ 1.00000000000000000000E0, 3.35617141647503099647E1, 5.21357949780152679795E2, 4.59432382970980127987E3, 2.26290000613890934246E4, 4.92673942608635921086E4}; XlaOp z = x * x; return x * EvaluatePolynomial<double>(z, kErfTCoefficient) / EvaluatePolynomial<double>(z, kErfUCoefficient); } XlaOp Erfc(XlaOp x) { auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Erfc", x)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x)); if (shape.element_type() == F64) { return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl64(x), ScalarLike(x, 1) - ErfImpl64(x)); } return DoWithUpcastToF32(x, {}, [](XlaOp x) { return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl32(x), ScalarLike(x, 1) - ErfImpl32Cephes(x)); }); }); } static XlaOp ErfImpl32(XlaOp x) { static const std::array<float, 5> kAlpha{ 0.00022905065861350646f, 0.0034082910107109506f, 0.050955695062380861f, 0.18520832239976145f, 1.128379143519084f}; static const std::array<float, 7> kBeta{-1.1791602954361697e-7, 0.000023547966471313185f, 0.0010179625278914885f, 0.014070470171167667f, 0.11098505178285362f, 0.49746925110067538f, 1.0f}; constexpr float kErfInvOneMinusHalfULP = 3.7439211627767994f; x = Clamp(ScalarLike(x, -kErfInvOneMinusHalfULP), x, ScalarLike(x, kErfInvOneMinusHalfULP)); auto x2 = x * x; return (x * EvaluatePolynomial<float>(x2, kAlpha)) / EvaluatePolynomial<float>(x2, kBeta); } namespace { XlaOp ErfInv32(XlaOp x) { constexpr int kDegree = 9; constexpr std::array<float, 9> w_less_than_5_constants = { 2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f, -4.39150654e-06f, 0.00021858087f, -0.00125372503f, -0.00417768164f, 0.246640727f, 1.50140941f}; constexpr std::array<float, 9> w_greater_than_5_constants = { -0.000200214257f, 0.000100950558f, 0.00134934322f, -0.00367342844f, 0.00573950773f, -0.0076224613f, 0.00943887047f, 1.00167406f, 2.83297682f}; auto w = -Log1p(-x * x); auto lt = Lt(w, ScalarLike(x, 5.0)); auto coefficient = [&](int i) { return Select(lt, FullLike(x, w_less_than_5_constants[i]), FullLike(x, w_greater_than_5_constants[i])); }; w = Select(lt, w - ScalarLike(x, 2.5), Sqrt(w) - ScalarLike(x, 3.0)); auto p = coefficient(0); for (int i = 1; i < kDegree; ++i) { p = coefficient(i) + p * w; } XlaOp result = p * x; auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x)); return Select(Eq(Abs(x), ScalarLike(x, 1)), x * MaxValue(&b, shape.element_type()), result); }); } XlaOp ErfInv64(XlaOp x) { constexpr std::array<double, 23> w_less_than_6_25_constants = { -3.6444120640178196996e-21, -1.685059138182016589e-19, 1.2858480715256400167e-18, 1.115787767802518096e-17, -1.333171662854620906e-16, 2.0972767875968561637e-17, 6.6376381343583238325e-15, -4.0545662729752068639e-14, -8.1519341976054721522e-14, 2.6335093153082322977e-12, -1.2975133253453532498e-11, -5.4154120542946279317e-11, 1.051212273321532285e-09, -4.1126339803469836976e-09, -2.9070369957882005086e-08, 4.2347877827932403518e-07, -1.3654692000834678645e-06, -1.3882523362786468719e-05, 0.0001867342080340571352, -0.00074070253416626697512, -0.0060336708714301490533, 0.24015818242558961693, 1.6536545626831027356}; constexpr std::array<double, 19> w_less_than_16_constants = { 2.2137376921775787049e-09, 9.0756561938885390979e-08, -2.7517406297064545428e-07, 1.8239629214389227755e-08, 1.5027403968909827627e-06, -4.013867526981545969e-06, 2.9234449089955446044e-06, 1.2475304481671778723e-05, -4.7318229009055733981e-05, 6.8284851459573175448e-05, 2.4031110387097893999e-05, -0.0003550375203628474796, 0.00095328937973738049703, -0.0016882755560235047313, 0.0024914420961078508066, -0.0037512085075692412107, 0.005370914553590063617, 1.0052589676941592334, 3.0838856104922207635, }; constexpr std::array<double, 17> w_greater_than_16_constants = { -2.7109920616438573243e-11, -2.5556418169965252055e-10, 1.5076572693500548083e-09, -3.7894654401267369937e-09, 7.6157012080783393804e-09, -1.4960026627149240478e-08, 2.9147953450901080826e-08, -6.7711997758452339498e-08, 2.2900482228026654717e-07, -9.9298272942317002539e-07, 4.5260625972231537039e-06, -1.9681778105531670567e-05, 7.5995277030017761139e-05, -0.00021503011930044477347, -0.00013871931833623122026, 1.0103004648645343977, 4.8499064014085844221, }; auto w = -Log1p(-x * x); auto lt_6_25 = Lt(w, ScalarLike(x, 6.25)); auto lt_16 = Lt(w, ScalarLike(x, 16)); auto coefficient = [&](int i) { auto c = FullLike(x, w_less_than_6_25_constants[i]); if (i < 19) { c = Select(lt_6_25, c, FullLike(x, w_less_than_16_constants[i])); } if (i < 17) { c = Select(lt_16, c, FullLike(x, w_greater_than_16_constants[i])); } return c; }; auto sqrt_w = Sqrt(w); w = Select(lt_6_25, w - ScalarLike(x, 3.125), sqrt_w - Select(lt_16, ScalarLike(x, 3.25), ScalarLike(x, 5.0))); auto p = coefficient(0); for (int i = 1; i < 17; ++i) { p = coefficient(i) + p * w; } for (int i = 17; i < 19; ++i) { p = Select(lt_16, coefficient(i) + p * w, p); } for (int i = 19; i < 23; ++i) { p = Select(lt_6_25, coefficient(i) + p * w, p); } XlaOp result = p * x; auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x)); return Select(Eq(Abs(x), ScalarLike(x, 1)), x * MaxValue(&b, shape.element_type()), result); }); } } XlaOp ErfInv(XlaOp x) { auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("ErfInv", x)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x)); if (shape.element_type() == F64) { return ErfInv64(x); } return DoWithUpcastToF32(x, {}, [](XlaOp x) { return ErfInv32(x); }); }); } namespace { static constexpr double kLanczosGamma = 7; static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478; static constexpr std::array<double, 8> kLanczosCoefficients = { 676.520368121885098567009190444019, -1259.13921672240287047156078755283, 771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853, -0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7}; } XlaOp Lgamma(XlaOp input) { auto do_it = [](XlaOp input) { XlaOp one_half = ScalarLike(input, 0.5); XlaOp one = ScalarLike(input, 1); XlaOp pi = ScalarLike(input, M_PI); XlaOp log_pi = ScalarLike(input, std::log(M_PI)); XlaOp log_sqrt_two_pi = ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2); XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5); XlaOp log_lanczos_gamma_plus_one_half = ScalarLike(input, std::log(kLanczosGamma + 0.5)); XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff); XlaOp need_to_reflect = Lt(input, one_half); XlaOp z = Select(need_to_reflect, -input, input - one); XlaOp x = base_lanczos_coeff; for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) { XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]); XlaOp index = ScalarLike(input, i); x = x + lanczos_coefficient / (z + index + one); } XlaOp t = lanczos_gamma_plus_one_half + z; XlaOp log_t = log_lanczos_gamma_plus_one_half + Log1p(z / lanczos_gamma_plus_one_half); XlaOp log_y = log_sqrt_two_pi + (z + one_half - t / log_t) * log_t + Log(x); XlaOp abs_input = Abs(input); XlaOp abs_frac_input = abs_input - Floor(abs_input); XlaOp reduced_frac_input = Select(Gt(abs_frac_input, ScalarLike(abs_frac_input, 0.5)), ScalarLike(abs_frac_input, 1) - abs_frac_input, abs_frac_input); XlaOp reflection_denom = Log(Sin(pi * reduced_frac_input)); XlaOp reflection = Select(IsFinite(reflection_denom), log_pi - reflection_denom - log_y, -reflection_denom); XlaOp result = Select(need_to_reflect, reflection, log_y); XlaOp inf_bcast = FullLike(input, std::numeric_limits<float>::infinity()); return Select(IsInf(input), inf_bcast, result); }; auto& b = *input.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Lgamma", input)); return DoWithUpcastToF32(input, {}, do_it); }); } static XlaOp Lbeta(XlaOp a, XlaOp b) { return Lgamma(a) + Lgamma(b) - Lgamma(a + b); } XlaOp Digamma(XlaOp input) { auto do_it = [](XlaOp input) { XlaOp zero = ScalarLike(input, 0); XlaOp one_half = ScalarLike(input, 0.5); XlaOp one = ScalarLike(input, 1); XlaOp pi = ScalarLike(input, M_PI); XlaOp lanczos_gamma = ScalarLike(input, kLanczosGamma); XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5); XlaOp log_lanczos_gamma_plus_one_half = ScalarLike(input, std::log(kLanczosGamma + 0.5)); XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff); XlaOp need_to_reflect = Lt(input, one_half); XlaOp z = Select(need_to_reflect, -input, input - one); XlaOp num = zero; XlaOp denom = base_lanczos_coeff; for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) { XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]); XlaOp index = ScalarLike(input, i); num = num - lanczos_coefficient / ((z + index + one) * (z + index + one)); denom = denom + lanczos_coefficient / (z + index + one); } XlaOp t = lanczos_gamma_plus_one_half + z; XlaOp log_t = log_lanczos_gamma_plus_one_half + Log1p(z / lanczos_gamma_plus_one_half); XlaOp y = log_t + num / denom - lanczos_gamma / t; XlaOp reduced_input = input + Abs(Floor(input + ScalarLike(input, 0.5))); XlaOp reflection = y - pi * Cos(pi * reduced_input) / Sin(pi * reduced_input); XlaOp real_result = Select(need_to_reflect, reflection, y); return Select(And(Le(input, zero), Eq(input, Floor(input))), FullLike(input, std::numeric_limits<float>::quiet_NaN()), real_result); }; auto& b = *input.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Digamma", input)); return DoWithUpcastToF32(input, {}, do_it); }); } namespace { enum kIgammaMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE }; template <kIgammaMode mode> XlaOp IgammaSeries(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled, xla::PrimitiveType type) { auto cond = [&](absl::Span<const XlaOp> vals, XlaBuilder* builder) -> absl::StatusOr<XlaOp> { XlaOp enabled = vals[0]; return Any(enabled); }; auto body = [&](absl::Span<const XlaOp> vals, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { XlaOp enabled = vals[0]; XlaOp r = vals[1]; XlaOp c = vals[2]; XlaOp ans = vals[3]; XlaOp x = vals[4]; XlaOp dc_da = vals[5]; XlaOp dans_da = vals[6]; r = r + ScalarLike(r, 1); dc_da = dc_da * (x / r) + (ScalarLike(r, -1) * c * x) / (r * r); dans_da = dans_da + dc_da; c = c * (x / r); ans = ans + c; XlaOp conditional; if (mode == VALUE) { conditional = And(enabled, Gt(c / ans, Epsilon(builder, type))); } else { conditional = And(enabled, Gt(Abs(dc_da / dans_da), Epsilon(builder, type))); } return std::vector<XlaOp>{ conditional, Select(enabled, r, vals[1]), Select(enabled, c, vals[2]), Select(enabled, ans, vals[3]), Select(enabled, x, vals[4]), Select(enabled, dc_da, vals[5]), Select(enabled, dans_da, vals[6]), }; }; auto& b = *ax.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { std::vector<XlaOp> vals = { enabled, a, FullLike(a, 1), FullLike(a, 1), x, FullLike(a, 0), FullLike(a, 0), }; TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igamma", &b)); XlaOp ans = vals[3]; XlaOp dans_da = vals[6]; if (mode == VALUE) { return (ans * ax) / a; } XlaOp dlogax_da = Log(x) - Digamma(a + ScalarLike(a, 1)); switch (mode) { case DERIVATIVE: return ax * (ans * dlogax_da + dans_da) / a; case SAMPLE_DERIVATIVE: default: return -(dans_da + ans * dlogax_da) * x / a; } }); } template <kIgammaMode mode> XlaOp IgammacContinuedFraction(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled, xla::PrimitiveType type) { auto cond = [&](absl::Span<const XlaOp> vals, XlaBuilder* builder) -> absl::StatusOr<XlaOp> { XlaOp enabled = vals[0]; XlaOp c = vals[5]; return And(Lt(c, ScalarLike(c, 2000)), Any(enabled)); }; auto body = [&](absl::Span<const XlaOp> vals, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { XlaOp enabled = vals[0]; XlaOp ans = vals[1]; XlaOp t = vals[2]; XlaOp y = vals[3]; XlaOp z = vals[4]; XlaOp c = vals[5]; XlaOp pkm1 = vals[6]; XlaOp qkm1 = vals[7]; XlaOp pkm2 = vals[8]; XlaOp qkm2 = vals[9]; XlaOp dpkm2_da = vals[10]; XlaOp dqkm2_da = vals[11]; XlaOp dpkm1_da = vals[12]; XlaOp dqkm1_da = vals[13]; XlaOp dans_da = vals[14]; c = c + ScalarLike(c, 1); y = y + ScalarLike(y, 1); z = z + ScalarLike(z, 2); XlaOp yc = y * c; XlaOp pk = pkm1 * z - pkm2 * yc; XlaOp qk = qkm1 * z - qkm2 * yc; XlaOp qk_is_nonzero = Ne(qk, ScalarLike(qk, 0)); XlaOp r = pk / qk; t = Select(qk_is_nonzero, Abs((ans - r) / r), FullLike(t, 1)); ans = Select(qk_is_nonzero, r, ans); XlaOp dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c; XlaOp dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c; XlaOp dans_da_new = Select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da); XlaOp grad_conditional = Select(qk_is_nonzero, Abs(dans_da_new - dans_da), FullLike(dans_da, 1)); pkm2 = pkm1; pkm1 = pk; qkm2 = qkm1; qkm1 = qk; dpkm2_da = dpkm1_da; dqkm2_da = dqkm1_da; dpkm1_da = dpk_da; dqkm1_da = dqk_da; XlaOp rescale = Gt(Abs(pk), Reciprocal(Epsilon(builder, type))); pkm2 = Select(rescale, pkm2 * Epsilon(builder, type), pkm2); pkm1 = Select(rescale, pkm1 * Epsilon(builder, type), pkm1); qkm2 = Select(rescale, qkm2 * Epsilon(builder, type), qkm2); qkm1 = Select(rescale, qkm1 * Epsilon(builder, type), qkm1); dpkm2_da = Select(rescale, dpkm2_da * Epsilon(builder, type), dpkm2_da); dqkm2_da = Select(rescale, dqkm2_da * Epsilon(builder, type), dqkm2_da); dpkm1_da = Select(rescale, dpkm1_da * Epsilon(builder, type), dpkm1_da); dqkm1_da = Select(rescale, dqkm1_da * Epsilon(builder, type), dqkm1_da); XlaOp conditional; if (mode == VALUE) { conditional = And(enabled, Gt(t, Epsilon(builder, type))); } else { conditional = And(enabled, Gt(grad_conditional, Epsilon(builder, type))); } return std::vector<XlaOp>{conditional, Select(enabled, ans, vals[1]), Select(enabled, t, vals[2]), Select(enabled, y, vals[3]), Select(enabled, z, vals[4]), c, Select(enabled, pkm1, vals[6]), Select(enabled, qkm1, vals[7]), Select(enabled, pkm2, vals[8]), Select(enabled, qkm2, vals[9]), Select(enabled, dpkm2_da, vals[10]), Select(enabled, dqkm2_da, vals[11]), Select(enabled, dpkm1_da, vals[12]), Select(enabled, dqkm1_da, vals[13]), Select(enabled, dans_da_new, vals[14])}; }; auto& b = *ax.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { XlaOp y = ScalarLike(a, 1) - a; XlaOp z = x + y + ScalarLike(x, 1); XlaOp c = ScalarLike(x, 0); XlaOp pkm2 = FullLike(x, 1); XlaOp qkm2 = x; XlaOp pkm1 = x + ScalarLike(x, 1); XlaOp qkm1 = z * x; XlaOp ans = pkm1 / qkm1; XlaOp t = FullLike(x, 1); XlaOp dpkm2_da = FullLike(x, 0); XlaOp dqkm2_da = FullLike(x, 0); XlaOp dpkm1_da = FullLike(x, 0); XlaOp dqkm1_da = -x; XlaOp dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1; std::vector<XlaOp> vals = {enabled, ans, t, y, z, c, pkm1, qkm1, pkm2, qkm2, dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da}; TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igammac", &b)); ans = vals[1]; if (mode == VALUE) { return ans * ax; } dans_da = vals[14]; XlaOp dlogax_da = Log(x) - Digamma(a); switch (mode) { case DERIVATIVE: return ax * (ans * dlogax_da + dans_da); case SAMPLE_DERIVATIVE: default: return -(dans_da + ans * dlogax_da) * x; } }); } } XlaOp Igamma(XlaOp a, XlaOp x) { auto& b = *a.builder(); auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp { XlaOp is_nan = Or(IsNan(a), IsNan(x)); XlaOp x_is_zero = Eq(x, ScalarLike(x, 0)); XlaOp x_is_infinity = Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity())); XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0))); XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a)); XlaOp ax = a * Log(x) - x - Lgamma(a); XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type))); ax = Exp(ax); XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan)); const double nan = std::numeric_limits<double>::quiet_NaN(); XlaOp output = Select( use_igammac, ScalarLike(a, 1) - IgammacContinuedFraction<VALUE>( ax, x, a, And(enabled, use_igammac), type), IgammaSeries<VALUE>(ax, x, a, And(enabled, Not(use_igammac)), type)); output = Select(x_is_zero, ZerosLike(output), output); output = Select(x_is_infinity, FullLike(output, 1), output); output = Select(Or(domain_error, is_nan), FullLike(a, nan), output); return output; }; return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a)); TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x)); if (a_shape != x_shape) { return InvalidArgument( "Arguments to Igamma must have equal shapes and types; got %s and %s", a_shape.ToString(), x_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igamma", a)); PrimitiveType a_x_type = a_shape.element_type(); bool needs_upcast = false; for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN, F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) { if (a_shape.element_type() == type) { needs_upcast = true; break; } } if (needs_upcast) { a = ConvertElementType(a, F32); x = ConvertElementType(x, F32); a_x_type = F32; } XlaOp result = doit(a, x, a_x_type); if (needs_upcast) { result = ConvertElementType(result, a_shape.element_type()); } return result; }); } XlaOp IgammaGradA(XlaOp a, XlaOp x) { auto& b = *a.builder(); auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp { XlaOp is_nan = Or(IsNan(a), IsNan(x)); XlaOp x_is_zero = Eq(x, ScalarLike(x, 0)); XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0))); XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a)); XlaOp ax = a * Log(x) - x - Lgamma(a); XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type))); ax = Exp(ax); XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan)); const double nan = std::numeric_limits<double>::quiet_NaN(); XlaOp output = Select(use_igammac, -IgammacContinuedFraction<DERIVATIVE>( ax, x, a, And(enabled, use_igammac), type), IgammaSeries<DERIVATIVE>( ax, x, a, And(enabled, Not(use_igammac)), type)); output = Select(x_is_zero, ZerosLike(output), output); output = Select(Or(domain_error, is_nan), FullLike(a, nan), output); return output; }; return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a)); TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x)); if (a_shape != x_shape) { return InvalidArgument( "Arguments to IgammaGradA must have equal shapes and types; got %s " "and %s", a_shape.ToString(), x_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IgammaGradA", a)); bool needs_upcast = false; for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN, F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) { if (a_shape.element_type() == type) { needs_upcast = true; break; } } if (needs_upcast) { a = ConvertElementType(a, F32); x = ConvertElementType(x, F32); } XlaOp result = doit(a, x, a_shape.element_type()); if (needs_upcast) { result = ConvertElementType(result, a_shape.element_type()); } return result; }); } XlaOp RandomGammaGrad(XlaOp a, XlaOp x) { auto& b = *a.builder(); auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp { XlaOp is_nan = Or(IsNan(a), IsNan(x)); XlaOp x_is_zero = Eq(x, ScalarLike(x, 0)); XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0))); XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a)); XlaOp ax = a * Log(x) - x - Lgamma(a); XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type))); ax = Exp(ax); XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan)); const double nan = std::numeric_limits<double>::quiet_NaN(); XlaOp output = Select(use_igammac, -IgammacContinuedFraction<SAMPLE_DERIVATIVE>( ax, x, a, And(enabled, use_igammac), type), IgammaSeries<SAMPLE_DERIVATIVE>( ax, x, a, And(enabled, Not(use_igammac)), type)); output = Select(x_is_zero, ZerosLike(output), output); output = Select(Or(domain_error, is_nan), FullLike(a, nan), output); return output; }; return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a)); TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x)); if (a_shape != x_shape) { return InvalidArgument( "Arguments to RandomGammaGrad must have equal shapes and types; got " "%s and %s", a_shape.ToString(), x_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RandomGammaGrad", a)); bool needs_upcast = a_shape.element_type() == F16 || a_shape.element_type() == BF16; if (needs_upcast) { a = ConvertElementType(a, F32); x = ConvertElementType(x, F32); } XlaOp result = doit(a, x, a_shape.element_type()); if (needs_upcast) { result = ConvertElementType(result, a_shape.element_type()); } return result; }); } XlaOp Igammac(XlaOp a, XlaOp x) { auto& b = *a.builder(); auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp { XlaOp out_of_range = Or(Le(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0))); XlaOp use_igamma = Or(Lt(x, ScalarLike(x, 1)), Lt(x, a)); XlaOp ax = a * Log(x) - x - Lgamma(a); XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type))); XlaOp enabled = Not(Or(out_of_range, underflow)); ax = Exp(ax); XlaOp result = Select(use_igamma, ScalarLike(a, 1) - IgammaSeries<VALUE>( ax, x, a, And(enabled, use_igamma), type), IgammacContinuedFraction<VALUE>( ax, x, a, And(enabled, Not(use_igamma)), type)); XlaOp x_is_infinity = Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity())); result = Select(x_is_infinity, ZerosLike(result), result); return Select(out_of_range, FullLike(a, 1), result); }; return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a)); TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x)); if (a_shape != x_shape) { return InvalidArgument( "Arguments to Igammac must have equal shapes and types; " "got %s and %s", a_shape.ToString(), x_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igammac", a)); PrimitiveType a_x_type = a_shape.element_type(); bool needs_upcast = a_shape.element_type() == F16 || a_shape.element_type() == BF16; if (needs_upcast) { a = ConvertElementType(a, F32); x = ConvertElementType(x, F32); a_x_type = F32; } XlaOp result = doit(a, x, a_x_type); if (needs_upcast) { result = ConvertElementType(result, a_shape.element_type()); } return result; }); } XlaOp RoundToEven(XlaOp x) { auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RoundToEven", x)); return RoundNearestEven(x); }); } XlaOp Acos(XlaOp x) { XlaBuilder* b = x.builder(); return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); if (primitive_util::IsComplexType(shape.element_type())) { auto one = ScalarLike(x, 1); auto imag_one = Complex( Zero(b, primitive_util::ComplexComponentType(shape.element_type())), One(b, primitive_util::ComplexComponentType(shape.element_type()))); auto result = Neg(imag_one * Log(x + imag_one * Sqrt((one + x) * (one - x)))); return result; } return Select(Ne(x, FullLike(x, -1)), ScalarLike(x, 2.0) * Atan2(Sqrt(ScalarLike(x, 1.0) - x * x), ScalarLike(x, 1.0) + x), FullLike(x, M_PI)); }); } XlaOp Asin(XlaOp x) { XlaBuilder* b = x.builder(); auto do_it = [&](XlaOp z) -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(z)); auto elem_ty = shape.element_type(); switch (elem_ty) { case C128: return math_impl::AsinComplex<double>(z); case C64: return math_impl::AsinComplex<float>(z); case F64: return math_impl::AsinReal<double>(z); case F32: return math_impl::AsinReal<float>(z); default: return InvalidArgument("Asin got unsupported element type %s", PrimitiveType_Name(elem_ty)); } }; return DoWithUpcastToF32( x, {}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); }); } XlaOp Atan(XlaOp x) { return Atan2(x, ScalarLike(x, 1.0)); } XlaOp Acosh(XlaOp x) { XlaBuilder* b = x.builder(); return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); auto one = ScalarLike(x, 1); auto neg_one = ScalarLike(x, -1); auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN()); auto naive_result = Log(x + Sqrt((x + one) * (x - one))); if (primitive_util::IsComplexType(shape.element_type())) { return naive_result; } auto overflow_result = Log(x) + Log(ScalarLike(x, 2)); auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type())); return Select(Lt(x, neg_one), nan, Select(Ge(x, sqrt_max_value), overflow_result, naive_result)); }); } XlaOp Asinh(XlaOp x) { XlaBuilder* b = x.builder(); auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); auto one = ScalarLike(x, 1); if (primitive_util::IsComplexType(shape.element_type())) { auto x_re = Real(x); auto x_im = Imag(x); auto z = Asin(Complex(x_im, -x_re)); auto z_im = Imag(z); auto on_branch_cut = And(Eq(x_re, ScalarLike(x_re, 0)), Gt(Abs(x_im), ScalarLike(x_im, 1))); return Complex(Select(on_branch_cut, z_im, -z_im), Real(z)); } auto a = Abs(x); auto small_result = Log1p(a + a * a / (one + Sqrt(a * a + one))); auto naive_result = Log(a + Sqrt(a * a + one)); auto overflow_result = Log(Abs(a)) + Log(ScalarLike(a, 2)); auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type())); return Sign(x) * Select(Ge(a, sqrt_max_value), overflow_result, Select(Le(a, one), small_result, naive_result)); }; return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); }); } XlaOp Atanh(XlaOp x) { XlaBuilder* b = x.builder(); auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); auto naive_result = (Log1p(x) - Log1p(-x)) * ScalarLike(x, 0.5); if (primitive_util::IsComplexType(shape.element_type())) { return naive_result; } auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN()); return Select(Gt(Abs(x), ScalarLike(x, 1)), nan, naive_result); }; return DoWithUpcastToF32(x, {BF16}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); }); } XlaOp Cosh(XlaOp x) { XlaBuilder* b = x.builder(); auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); auto log_one_half = Log(ScalarLike(x, 0.5)); auto result = Exp(x + log_one_half) + Exp(-x + log_one_half); if (primitive_util::IsComplexType(shape.element_type())) { return result; } return Max(result, ScalarLike(result, 1.0)); }; return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); }); } XlaOp Sinh(XlaOp x) { XlaBuilder* b = x.builder(); auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x)); auto one_half = ScalarLike(x, 0.5); auto log_one_half = Log(ScalarLike(x, 0.5)); auto large_sinh_result = Exp(x + log_one_half) - Exp(-x + log_one_half); if (primitive_util::IsComplexType(shape.element_type())) { return large_sinh_result; } auto expm1 = Expm1(x); auto one = ScalarLike(x, 1.); auto small_sinh_result = one_half * (expm1 + expm1 / (expm1 + one)); return Select(Lt(Abs(x), one), small_sinh_result, large_sinh_result); }; return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); }); } XlaOp MaybeConjugate(XlaOp x, bool conjugate) { XlaBuilder* builder = x.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x)); auto perform_conj = primitive_util::IsComplexType(shape.element_type()) && conjugate; return perform_conj ? Conj(x) : x; }); } XlaOp NextAfter(XlaOp from, XlaOp to) { auto builder = from.builder(); return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto shape, builder->GetShape(from)); int bitwidth = primitive_util::BitWidth(shape.element_type()); auto int_type = primitive_util::UnsignedIntegralTypeForBitWidth(bitwidth); auto from_as_int = BitcastConvertType(from, int_type); auto to_as_int = BitcastConvertType(to, int_type); auto from_is_nan = Ne(from, from); auto to_is_nan = Ne(to, to); auto nan_input = Or(from_is_nan, to_is_nan); auto result_for_nan = Broadcast(ScalarLike(from, std::numeric_limits<double>::quiet_NaN()), shape.dimensions()); result_for_nan = BitcastConvertType(result_for_nan, int_type); const int64_t sign_mask = int64_t{1} << (bitwidth - 1); auto from_abs = And(from_as_int, ScalarLike(from_as_int, ~sign_mask)); auto to_abs = And(to_as_int, ScalarLike(to_as_int, ~sign_mask)); auto from_and_to_are_equal = Eq(from_as_int, to_as_int); auto result_for_equal = to_as_int; auto from_is_zero = Eq(from_abs, ZerosLike(from_abs)); auto to_is_zero = Eq(to_abs, ZerosLike(to_abs)); auto result_for_both_zero = to_as_int; auto from_sign = And(from_as_int, ScalarLike(from_as_int, sign_mask)); auto to_sign = And(to_as_int, ScalarLike(to_as_int, sign_mask)); auto result_for_from_zero_to_non_zero = Or(to_sign, ScalarLike(from_as_int, 1)); auto signs_disagree = Ne(from_sign, to_sign); auto from_magnitude_larger_than_to = Gt(from_abs, to_abs); auto result_has_smaller_magnitude = Or(from_magnitude_larger_than_to, signs_disagree); auto magnitude_adjustment = Select(result_has_smaller_magnitude, Broadcast(ScalarLike(from_as_int, -1), shape.dimensions()), Broadcast(ScalarLike(from_as_int, 1), shape.dimensions())); auto result = Add(from_as_int, magnitude_adjustment); result = Select(from_is_zero, Select(to_is_zero, result_for_both_zero, result_for_from_zero_to_non_zero), result); result = Select(from_and_to_are_equal, result_for_equal, result); result = Select(nan_input, result_for_nan, result); return BitcastConvertType(result, shape.element_type()); }); } static XlaOp I0eImpl32(XlaOp x) { static const std::array<float, 18> kI0eCoeffsA{ -1.30002500998624804212E-8f, 6.04699502254191894932E-8f, -2.67079385394061173391E-7f, 1.11738753912010371815E-6f, -4.41673835845875056359E-6f, 1.64484480707288970893E-5f, -5.75419501008210370398E-5f, 1.88502885095841655729E-4f, -5.76375574538582365885E-4f, 1.63947561694133579842E-3f, -4.32430999505057594430E-3f, 1.05464603945949983183E-2f, -2.37374148058994688156E-2f, 4.93052842396707084878E-2f, -9.49010970480476444210E-2f, 1.71620901522208775349E-1f, -3.04682672343198398683E-1f, 6.76795274409476084995E-1f}; static const std::array<float, 7> kI0eCoeffsB{ 3.39623202570838634515E-9f, 2.26666899049817806459E-8f, 2.04891858946906374183E-7f, 2.89137052083475648297E-6f, 6.88975834691682398426E-5f, 3.36911647825569408990E-3f, 8.04490411014108831608E-1f}; x = Abs(x); auto half = xla::ScalarLike(x, 0.5); auto two = xla::ScalarLike(x, 2.0); auto thirty_two = xla::ScalarLike(x, 32.0); auto result_le_8 = EvaluateChebyshevPolynomial<float>(half * x - two, kI0eCoeffsA); auto result_gt_8 = EvaluateChebyshevPolynomial<float>(thirty_two / x - two, kI0eCoeffsB) / Sqrt(x); return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8); } static XlaOp I0eImpl64(XlaOp x) { static const std::array<double, 30> kI0eCoeffsA{ -4.41534164647933937950E-18, 3.33079451882223809783E-17, -2.43127984654795469359E-16, 1.71539128555513303061E-15, -1.16853328779934516808E-14, 7.67618549860493561688E-14, -4.85644678311192946090E-13, 2.95505266312963983461E-12, -1.72682629144155570723E-11, 9.67580903537323691224E-11, -5.18979560163526290666E-10, 2.65982372468238665035E-9, -1.30002500998624804212E-8, 6.04699502254191894932E-8, -2.67079385394061173391E-7, 1.11738753912010371815E-6, -4.41673835845875056359E-6, 1.64484480707288970893E-5, -5.75419501008210370398E-5, 1.88502885095841655729E-4, -5.76375574538582365885E-4, 1.63947561694133579842E-3, -4.32430999505057594430E-3, 1.05464603945949983183E-2, -2.37374148058994688156E-2, 4.93052842396707084878E-2, -9.49010970480476444210E-2, 1.71620901522208775349E-1, -3.04682672343198398683E-1, 6.76795274409476084995E-1}; static const std::array<double, 25> kI0eCoeffsB{ -7.23318048787475395456E-18, -4.83050448594418207126E-18, 4.46562142029675999901E-17, 3.46122286769746109310E-17, -2.82762398051658348494E-16, -3.42548561967721913462E-16, 1.77256013305652638360E-15, 3.81168066935262242075E-15, -9.55484669882830764870E-15, -4.15056934728722208663E-14, 1.54008621752140982691E-14, 3.85277838274214270114E-13, 7.18012445138366623367E-13, -1.79417853150680611778E-12, -1.32158118404477131188E-11, -3.14991652796324136454E-11, 1.18891471078464383424E-11, 4.94060238822496958910E-10, 3.39623202570838634515E-9, 2.26666899049817806459E-8, 2.04891858946906374183E-7, 2.89137052083475648297E-6, 6.88975834691682398426E-5, 3.36911647825569408990E-3, 8.04490411014108831608E-1}; x = Abs(x); auto half = xla::ScalarLike(x, 0.5); auto two = xla::ScalarLike(x, 2.0); auto thirty_two = xla::ScalarLike(x, 32.0); auto result_le_8 = EvaluateChebyshevPolynomial<double>(half * x - two, kI0eCoeffsA); auto result_gt_8 = EvaluateChebyshevPolynomial<double>(thirty_two / x - two, kI0eCoeffsB) / Sqrt(x); return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8); } XlaOp BesselI0e(XlaOp x) { auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI0e", x)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x)); if (shape.element_type() == F64) { return I0eImpl64(x); } return DoWithUpcastToF32(x, {BF16, F16}, [](XlaOp x) { return I0eImpl32(x); }); }); } static XlaOp I1eImpl32(XlaOp x) { static const std::array<float, 17> kI1eCoeffsA{ 9.38153738649577178388E-9f, -4.44505912879632808065E-8f, 2.00329475355213526229E-7f, -8.56872026469545474066E-7f, 3.47025130813767847674E-6f, -1.32731636560394358279E-5f, 4.78156510755005422638E-5f, -1.61760815825896745588E-4f, 5.12285956168575772895E-4f, -1.51357245063125314899E-3f, 4.15642294431288815669E-3f, -1.05640848946261981558E-2f, 2.47264490306265168283E-2f, -5.29459812080949914269E-2f, 1.02643658689847095384E-1f, -1.76416518357834055153E-1f, 2.52587186443633654823E-1f}; static const std::array<float, 7> kI1eCoeffsB{ -3.83538038596423702205E-9f, -2.63146884688951950684E-8f, -2.51223623787020892529E-7f, -3.88256480887769039346E-6f, -1.10588938762623716291E-4f, -9.76109749136146840777E-3f, 7.78576235018280120474E-1f}; XlaOp z = Abs(x); auto half = xla::ScalarLike(x, 0.5); auto two = xla::ScalarLike(x, 2.0); auto thirty_two = xla::ScalarLike(x, 32.0); auto result_le_8 = z * EvaluateChebyshevPolynomial<float>(half * z - two, kI1eCoeffsA); auto result_gt_8 = EvaluateChebyshevPolynomial<float>(thirty_two / z - two, kI1eCoeffsB) / Sqrt(z); return Sign(x) * Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8); } static XlaOp I1eImpl64(XlaOp x) { static const std::array<double, 29> kI1eCoeffsA{ 2.77791411276104639959E-18, -2.11142121435816608115E-17, 1.55363195773620046921E-16, -1.10559694773538630805E-15, 7.60068429473540693410E-15, -5.04218550472791168711E-14, 3.22379336594557470981E-13, -1.98397439776494371520E-12, 1.17361862988909016308E-11, -6.66348972350202774223E-11, 3.62559028155211703701E-10, -1.88724975172282928790E-9, 9.38153738649577178388E-9, -4.44505912879632808065E-8, 2.00329475355213526229E-7, -8.56872026469545474066E-7, 3.47025130813767847674E-6, -1.32731636560394358279E-5, 4.78156510755005422638E-5, -1.61760815825896745588E-4, 5.12285956168575772895E-4, -1.51357245063125314899E-3, 4.15642294431288815669E-3, -1.05640848946261981558E-2, 2.47264490306265168283E-2, -5.29459812080949914269E-2, 1.02643658689847095384E-1, -1.76416518357834055153E-1, 2.52587186443633654823E-1}; static const std::array<double, 25> kI1eCoeffsB{ 7.51729631084210481353E-18, 4.41434832307170791151E-18, -4.65030536848935832153E-17, -3.20952592199342395980E-17, 2.96262899764595013876E-16, 3.30820231092092828324E-16, -1.88035477551078244854E-15, -3.81440307243700780478E-15, 1.04202769841288027642E-14, 4.27244001671195135429E-14, -2.10154184277266431302E-14, -4.08355111109219731823E-13, -7.19855177624590851209E-13, 2.03562854414708950722E-12, 1.41258074366137813316E-11, 3.25260358301548823856E-11, -1.89749581235054123450E-11, -5.58974346219658380687E-10, -3.83538038596423702205E-9, -2.63146884688951950684E-8, -2.51223623787020892529E-7, -3.88256480887769039346E-6, -1.10588938762623716291E-4, -9.76109749136146840777E-3, 7.78576235018280120474E-1}; XlaOp z = Abs(x); auto half = xla::ScalarLike(x, 0.5); auto two = xla::ScalarLike(x, 2.0); auto thirty_two = xla::ScalarLike(x, 32.0); auto result_le_8 = z * EvaluateChebyshevPolynomial<double>(half * z - two, kI1eCoeffsA); auto result_gt_8 = EvaluateChebyshevPolynomial<double>(thirty_two / z - two, kI1eCoeffsB) / Sqrt(z); return Sign(x) * Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8); } XlaOp BesselI1e(XlaOp x) { auto& b = *x.builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI1e", x)); TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x)); if (shape.element_type() == F64) { return I1eImpl64(x); } return DoWithUpcastToF32(x, {BF16, F16}, [](XlaOp x) { return I1eImpl32(x); }); }); } static XlaOp LentzThompsonBarnettAlgorithm( int64_t num_iterations, double small, double threshold, const ForEachIndexBodyFunction& nth_partial_numerator, const ForEachIndexBodyFunction& nth_partial_denominator, absl::Span<const XlaOp> inputs, absl::string_view name) { auto& b = *inputs.front().builder(); return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_RET_CHECK(num_iterations < INT32_MAX); enum { kIterationIdx, kValuesUnconvergedIdx, kCIdx, kDIdx, kHIdx, kFirstInputIdx, }; auto while_cond_fn = [num_iterations](absl::Span<const XlaOp> values, XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> { auto iteration = values[kIterationIdx]; auto iterations_remain_cond = Lt(iteration, ScalarLike(iteration, num_iterations)); auto values_unconverged_cond = values[kValuesUnconvergedIdx]; return And(iterations_remain_cond, values_unconverged_cond); }; auto while_body_fn = [small, threshold, &nth_partial_numerator, &nth_partial_denominator]( absl::Span<const XlaOp> values, XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> { XlaOp iteration = values[kIterationIdx]; TF_ASSIGN_OR_RETURN( std::vector<XlaOp> partial_numerator, nth_partial_numerator(iteration, values.subspan(kFirstInputIdx), body_builder)); TF_RET_CHECK(partial_numerator.size() == 1); TF_ASSIGN_OR_RETURN( std::vector<XlaOp> partial_denominator, nth_partial_denominator(iteration, values.subspan(kFirstInputIdx), body_builder)); TF_RET_CHECK(partial_denominator.size() == 1); auto c = partial_denominator[0] + partial_numerator[0] / values[kCIdx]; auto small_constant = FullLike(c, small); c = Select(Lt(Abs(c), small_constant), small_constant, c); auto d = partial_denominator[0] + partial_numerator[0] * values[kDIdx]; d = Select(Lt(Abs(d), small_constant), small_constant, d); d = Reciprocal(d); auto delta = c * d; auto h = values[kHIdx] * delta; std::vector<XlaOp> updated_values(values.size()); updated_values[kIterationIdx] = Add(iteration, ScalarLike(iteration, 1)); updated_values[kCIdx] = c; updated_values[kDIdx] = d; updated_values[kHIdx] = h; std::copy(values.begin() + kFirstInputIdx, values.end(), updated_values.begin() + kFirstInputIdx); auto tolerance_comparison = Ge(Abs(Sub(delta, FullLike(delta, 1.0))), FullLike(delta, threshold)); updated_values[kValuesUnconvergedIdx] = ReduceAll(tolerance_comparison, ConstantR0<bool>(body_builder, false), CreateScalarOrComputation(PRED, body_builder)); return updated_values; }; TF_ASSIGN_OR_RETURN(std::vector<XlaOp> partial_denominator, nth_partial_denominator(Zero(&b, U32), inputs, &b)); TF_RET_CHECK(partial_denominator.size() == 1); auto h = partial_denominator[0]; auto small_constant = FullLike(h, small); h = Select(Lt(Abs(h), small_constant), small_constant, h); std::vector<XlaOp> values(kFirstInputIdx + inputs.size()); values[kIterationIdx] = One(&b, U32); values[kValuesUnconvergedIdx] = ConstantR0<bool>(&b, true); values[kCIdx] = h; values[kDIdx] = FullLike(h, 0.0); values[kHIdx] = h; std::copy(inputs.begin(), inputs.end(), values.begin() + kFirstInputIdx); TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn, values, name, &b)); return values[kHIdx]; }); } XlaOp RegularizedIncompleteBeta(XlaOp a, XlaOp b, XlaOp x) { auto& builder = *x.builder(); return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(Shape shape, builder.GetShape(a)); TF_ASSIGN_OR_RETURN(Shape b_shape, builder.GetShape(b)); TF_ASSIGN_OR_RETURN(Shape x_shape, builder.GetShape(x)); if (b_shape.element_type() != shape.element_type() || x_shape.element_type() != shape.element_type()) { return InvalidArgument( "Operands to RegularizedIncompleteBeta must have identical types, " "got shapes %s, %s, and %s", shape.ToString(), b_shape.ToString(), x_shape.ToString()); } if (!primitive_util::IsFloatingPointType(shape.element_type())) { return InvalidArgument( "Operands to RegularizedIncompleteBeta must be real-valued " "floating-point, but got %s", PrimitiveType_Name(shape.element_type())); } PrimitiveType element_type = shape.element_type(); if (element_type == F16 || element_type == BF16) { element_type = F32; a = ConvertElementType(a, F32); b = ConvertElementType(b, F32); x = ConvertElementType(x, F32); } auto NthPartialBetaincNumerator = [&](XlaOp iteration, absl::Span<const XlaOp> inputs, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { auto a = inputs[0]; auto b = inputs[1]; auto x = inputs[2]; auto iteration_bcast = Broadcast(iteration, shape.dimensions()); auto iteration_is_even = Eq(iteration_bcast % FullLike(iteration_bcast, 2), FullLike(iteration_bcast, 0)); auto iteration_is_one = Eq(iteration_bcast, FullLike(iteration_bcast, 1)); auto iteration_minus_one = iteration_bcast - FullLike(iteration_bcast, 1); auto m = iteration_minus_one / FullLike(iteration_minus_one, 2); m = ConvertElementType(m, element_type); auto one = FullLike(a, 1.0); auto two = FullLike(a, 2.0); auto even_numerator = -(a + m) * (a + b + m) * x / ((a + two * m) * (a + two * m + one)); auto odd_numerator = m * (b - m) * x / ((a + two * m - one) * (a + two * m)); auto one_numerator = ScalarLike(x, 1.0); auto numerator = Select(iteration_is_even, even_numerator, odd_numerator); return std::vector<XlaOp>{ Select(iteration_is_one, one_numerator, numerator)}; }; auto NthPartialBetaincDenominator = [&shape](XlaOp iteration, absl::Span<const XlaOp> inputs, XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> { auto x = inputs[2]; auto iteration_bcast = Broadcast(iteration, shape.dimensions()); return std::vector<XlaOp>{ Select(Eq(iteration_bcast, ScalarLike(iteration_bcast, 0)), ScalarLike(x, 0.0), ScalarLike(x, 1.0))}; }; auto result_is_nan = Or(Or(Or(Le(a, ScalarLike(a, 0.0)), Le(b, ScalarLike(b, 0.0))), Lt(x, ScalarLike(x, 0.0))), Gt(x, ScalarLike(x, 1.0))); auto converges_rapidly = Lt(x, (a + FullLike(a, 1.0)) / (a + b + FullLike(b, 2.0))); auto a_orig = a; a = Select(converges_rapidly, a, b); b = Select(converges_rapidly, b, a_orig); x = Select(converges_rapidly, x, Sub(FullLike(x, 1.0), x)); XlaOp continued_fraction; if (element_type == F32) { continued_fraction = LentzThompsonBarnettAlgorithm( 200, std::numeric_limits<float>::epsilon() / 2.0f, std::numeric_limits<float>::epsilon() / 2.0f, NthPartialBetaincNumerator, NthPartialBetaincDenominator, {a, b, x}, "Betainc"); } else { TF_RET_CHECK(element_type == F64); continued_fraction = LentzThompsonBarnettAlgorithm( 600, std::numeric_limits<double>::epsilon() / 2.0f, std::numeric_limits<double>::epsilon() / 2.0f, NthPartialBetaincNumerator, NthPartialBetaincDenominator, {a, b, x}, "Betainc"); } auto lbeta = Lbeta(a, b); auto result = continued_fraction * Exp(Log(x) * a + Log1p(-x) * b - lbeta) / a; result = Select(result_is_nan, NanValue(&builder, element_type), result); auto out = Select(converges_rapidly, result, Sub(FullLike(result, 1.0), result)); return shape.element_type() == element_type ? out : ConvertElementType(out, shape.element_type()); }); } XlaOp Polygamma(XlaOp n, XlaOp x) { auto& builder = *x.builder(); auto doit = [](XlaOp n, XlaOp x, PrimitiveType type) -> XlaOp { XlaOp n_plus_one = n + ScalarLike(n, 1.); XlaOp sign = (ScalarLike(n, 2.) * Rem(n, ScalarLike(n, 2.)) - ScalarLike(n, 1.)); const double nan = std::numeric_limits<double>::quiet_NaN(); XlaOp output = Select(Eq(n, ScalarLike(n, 0.)), Digamma(x), sign * Exp(Lgamma(n_plus_one)) * Zeta(n_plus_one, x)); output = Select(Or(Ne(n, Floor(n)), Lt(n, ScalarLike(n, 0.))), ScalarLike(n, nan), output); return output; }; return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto n_shape, builder.GetShape(n)); TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x)); if (n_shape != x_shape) { return InvalidArgument( "Arguments to Polygamma must have equal shapes and types; " "got %s and %s", n_shape.ToString(), x_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x)); bool needs_upcast = n_shape.element_type() == F16 || x_shape.element_type() == BF16; if (needs_upcast) { n = ConvertElementType(n, F32); x = ConvertElementType(x, F32); } XlaOp result = doit(n, x, n_shape.element_type()); if (needs_upcast) { result = ConvertElementType(result, n_shape.element_type()); } return result; }); } XlaOp Zeta(XlaOp x, XlaOp q) { auto& builder = *x.builder(); auto doit = [&builder](XlaOp x, XlaOp q, PrimitiveType type) -> XlaOp { static constexpr int M = 12, N = 9; static const std::array<double, M> kZetaCoeffs{ -7.1661652561756670113e18, 1.8152105401943546773e17, -4.5979787224074726105e15, 1.1646782814350067249e14, -2.950130727918164224e12, 7.47242496e10, -1.8924375803183791606e9, 47900160.0, -1209600.0, 30240.0, -720.0, 12.0, }; XlaOp acc = q, neg_power = ScalarLike(q, 0.); XlaOp S = Pow(q, Neg(x)); for (int i = 0; i < N; ++i) { acc = acc + ScalarLike(acc, 1.); neg_power = Pow(acc, Neg(x)); S = S + neg_power; } acc = acc + ScalarLike(acc, 1.); neg_power = Pow(acc, Neg(x)); XlaOp I = neg_power * acc / (x - ScalarLike(acc, 1.)); XlaOp a_inverse_square = Reciprocal(Square(acc)); XlaOp horner_sum = ScalarLike(acc, 0.); XlaOp factor = ScalarLike(acc, 1.); static constexpr int kTwoKMinusOne = 2 * M - 1; for (int i = 0; i < M - 1; ++i) { factor = (x + ScalarLike(x, kTwoKMinusOne - 1 - 2 * i)) * (x + ScalarLike(x, kTwoKMinusOne - 2 - 2 * i)); horner_sum = factor * a_inverse_square * (horner_sum + ScalarLike(acc, 1. / kZetaCoeffs[i])); } XlaOp T = neg_power * (ScalarLike(neg_power, 0.5) + x / acc * (ScalarLike(acc, 1. / kZetaCoeffs[M - 1]) + horner_sum)); XlaOp accurate_result = S + I + T; const double nan = std::numeric_limits<double>::quiet_NaN(); const double inf = std::numeric_limits<double>::infinity(); XlaOp output = Select(Lt(Abs(neg_power), Abs(S) * Epsilon(&builder, type)), S, accurate_result); output = Select(Eq(x, ScalarLike(x, 1.)), ScalarLike(x, inf), output); output = Select(Lt(x, ScalarLike(x, 1.)), ScalarLike(x, nan), output); XlaOp x_domain_error = And(Le(q, ScalarLike(x, 0.)), Ne(x, Floor(x))); output = Select(x_domain_error, ScalarLike(x, nan), output); XlaOp at_pole = And(Le(q, ScalarLike(x, 0.)), Eq(q, Floor(q))); XlaOp x_is_even_int = And(Eq(Rem(x, ScalarLike(x, 2.)), ScalarLike(x, 0.)), Eq(x, Floor(x))); output = Select( at_pole, Select(x_is_even_int, ScalarLike(x, inf), ScalarLike(x, nan)), output); return output; }; return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> { TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x)); TF_ASSIGN_OR_RETURN(auto q_shape, builder.GetShape(q)); if (x_shape != q_shape) { return InvalidArgument( "Arguments to Zeta must have equal shapes and types; got %s and %s", x_shape.ToString(), q_shape.ToString()); } TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x)); bool needs_upcast = x_shape.element_type() == F16 || x_shape.element_type() == BF16; if (needs_upcast) { x = ConvertElementType(x, F32); q = ConvertElementType(q, F32); } XlaOp result = doit(x, q, x_shape.element_type()); if (needs_upcast) { result = ConvertElementType(result, x_shape.element_type()); } return result; }); } }
#include "xla/hlo/builder/lib/math.h" #include <cmath> #include <complex> #include <functional> #include <limits> #include <memory> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "xla/array3d.h" #include "xla/error_spec.h" #include "xla/hlo/builder/lib/constants.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/service.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/test_macros.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/xla_data.pb.h" namespace xla { namespace { class MathTest : public ClientLibraryTestBase { public: ErrorSpec error_spec_{0.0001}; }; template <typename T> class MathTypedTest : public MathTest { public: void TestLogEdgeCases() { SetFastMathDisabled(true); XlaBuilder b(TestName()); Log(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}}), &b)); ComputeAndCompareR1<T>(&b, {-std::numeric_limits<T>::infinity(), -std::numeric_limits<T>::infinity()}, {}, error_spec_); } void TestLog1pEdgeCases() { SetFastMathDisabled(true); XlaBuilder b(TestName()); Log1p(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}, T{-1.0}}), &b)); ComputeAndCompareR1<T>( &b, {T{0.0}, T{-0.0}, -std::numeric_limits<T>::infinity()}, {}, error_spec_); } void TestIsInfOrNan() { SetFastMathDisabled(true); XlaBuilder b(TestName()); auto x = ConstantR1<T>(&b, { T{0}, T{100}, T{-1000}, T{std::numeric_limits<T>::max()}, T{std::numeric_limits<T>::lowest()}, T{std::numeric_limits<float>::infinity()}, T{-std::numeric_limits<float>::infinity()}, T{std::numeric_limits<float>::quiet_NaN()}, T{std::numeric_limits<float>::signaling_NaN()}, }); Tuple(&b, {IsFinite(x), IsInf(x), IsPosInf(x), IsNegInf(x), IsNan(x)}); auto expected = LiteralUtil::MakeTupleOwned( LiteralUtil::CreateR1<bool>( {true, true, true, true, true, false, false, false, false}), LiteralUtil::CreateR1<bool>( {false, false, false, false, false, true, true, false, false}), LiteralUtil::CreateR1<bool>( {false, false, false, false, false, true, false, false, false}), LiteralUtil::CreateR1<bool>( {false, false, false, false, false, false, true, false, false}), LiteralUtil::CreateR1<bool>( {false, false, false, false, false, false, false, true, true})); ComputeAndCompareLiteral(&b, expected, {}); } void TestIsNegZero() { SetFastMathDisabled(true); XlaBuilder b(TestName()); T inf(std::numeric_limits<float>::infinity()); T nan(std::numeric_limits<float>::quiet_NaN()); IsNegZero(AddParam( LiteralUtil::CreateR1<T>({T{-0.0}, T{0}, T{1}, T{-1}, inf, -inf, nan}), &b)); ComputeAndCompareLiteral( &b, LiteralUtil::CreateR1<bool>( {true, false, false, false, false, false, false}), {}, error_spec_); } void TestSqrtPowInequivalence() { SetFastMathDisabled(true); mutable_debug_options()->clear_xla_disable_hlo_passes(); const T inf(std::numeric_limits<float>::infinity()); const T nan(std::numeric_limits<float>::quiet_NaN()); XlaBuilder b(TestName()); auto x = AddParam(LiteralUtil::CreateR1<T>({-inf}), &b); ConcatInDim( &b, {Sqrt(x), Pow(x, ScalarLike(x, 0.5)), Pow(x, ScalarLike(x, 0.3))}, 0); std::vector<T> expected = {nan, inf, inf}; ComputeAndCompareR1<T>(&b, expected, {}, error_spec_); } void TestErfInvEdgeCases() { SetFastMathDisabled(true); XlaBuilder b(TestName()); auto x = AddParam(LiteralUtil::CreateR1<T>({T{-1}, T{1}, T{0}}), &b); ErfInv(x); const T inf(std::numeric_limits<float>::infinity()); std::vector<T> expected = {-inf, inf, T{0}}; ComputeAndCompareR1<T>(&b, expected, {}, error_spec_); } void TestErfEdgeCases() { SetFastMathDisabled(true); const T kErfInvOneMinusHalfULP = T(3.832506856900711); const T inf(std::numeric_limits<float>::infinity()); XlaBuilder b(TestName()); auto x = AddParam(LiteralUtil::CreateR1<T>({T{-inf}, T{inf}, T{-0}, T{0}, T{-kErfInvOneMinusHalfULP}, T{kErfInvOneMinusHalfULP}}), &b); Erf(x); std::vector<T> expected = {T(-1), T(1), T(-0), T(0), T(-1), T(1)}; ComputeAndCompareR1<T>(&b, expected, {}, error_spec_); } }; using TestTypes = ::testing::Types<float #ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16 , Eigen::half #endif #ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64 , double #endif >; TYPED_TEST_CASE(MathTypedTest, TestTypes); XLA_TYPED_TEST(MathTypedTest, LogEdgeCases) { this->TestLogEdgeCases(); } XLA_TYPED_TEST(MathTypedTest, Log1pEdgeCases) { this->TestLog1pEdgeCases(); } XLA_TYPED_TEST(MathTypedTest, IsInfOrNan) { this->TestIsInfOrNan(); } XLA_TYPED_TEST(MathTypedTest, IsNegZero) { this->TestIsNegZero(); } XLA_TYPED_TEST(MathTypedTest, DISABLED_ON_TPU(SqrtPowInequivalence)) { this->TestSqrtPowInequivalence(); } XLA_TYPED_TEST(MathTypedTest, ErfInvEdgeCases) { this->TestErfInvEdgeCases(); } XLA_TYPED_TEST(MathTypedTest, ErfEdgeCases) { this->TestErfEdgeCases(); } XLA_TEST_F(MathTest, RealFpOnlyOps) { for (int64_t i = PrimitiveType_MIN; i <= PrimitiveType_MAX; ++i) { auto ty = static_cast<PrimitiveType>(i); SCOPED_TRACE(PrimitiveType_Name(ty)); Shape shape; if (ty == U4 || ty == S4) { continue; } if (primitive_util::IsArrayType(ty)) { shape = ShapeUtil::MakeShape(ty, {42}); } else if (ty == PrimitiveType::TUPLE) { shape = ShapeUtil::MakeTupleShape({}); } else if (ty == PrimitiveType::OPAQUE_TYPE) { shape = ShapeUtil::MakeOpaqueShape(); } else if (ty == PrimitiveType::TOKEN) { shape = ShapeUtil::MakeTokenShape(); } else { continue; } for (const auto& test : std::vector<std::pair<std::function<XlaOp(XlaOp)>, std::string>>({ {IsFinite, "is_finite"}, {IsInf, "is_inf"}, {IsPosInf, "is_pos_inf"}, {IsNegInf, "is_neg_inf"}, {IsNan, "is_nan"}, {Erf, "erf"}, {Erfc, "erfc"}, {Lgamma, "lgamma"}, {Digamma, "digamma"}, {RoundToEven, "round_to_even"}, })) { SCOPED_TRACE(test.second); XlaBuilder b(TestName()); XlaOp p = Parameter(&b, 0, shape, "p0"); test.first(p); if (primitive_util::IsFloatingPointType(ty)) { TF_EXPECT_OK(b.first_error()); } else { EXPECT_FALSE(b.first_error().ok()); } } } } XLA_TEST_F(MathTest, SqrtF32) { XlaBuilder builder(TestName()); Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32); std::unique_ptr<GlobalData> zero_data = client_->TransferToServer(zero_literal).value(); XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero"); Sqrt(zero); ComputeAndCompareR0<float>(&builder, 0.0f, {zero_data.get()}, error_spec_); } XLA_TEST_F(MathTest, SqrtF64) { XlaBuilder builder(TestName()); Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F64); std::unique_ptr<GlobalData> zero_data = client_->TransferToServer(zero_literal).value(); XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero"); Sqrt(zero); ComputeAndCompareR0<double>(&builder, 0.0f, {zero_data.get()}, error_spec_); } #ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64 XLA_TEST_F(MathTest, ErfInvF64) { XlaBuilder builder(TestName()); auto x = ConstantR1<double>( &builder, {-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}); ErfInv(x); std::vector<double> expected = {-1.163087153676674, -0.9061938024368231, -0.732869077959217, -0.5951160814499948, -0.4769362762044698, -0.37080715859355795, -0.27246271472675443, -0.1791434546212916, -0.08885599049425767, 0., 0.08885599049425777, 0.1791434546212916, 0.27246271472675443, 0.37080715859355784, 0.4769362762044698, 0.5951160814499948, 0.732869077959217, 0.9061938024368231, 1.1630871536766736}; ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec{1e-15}); } #endif XLA_TEST_F(MathTest, SquareTenValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>( &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6}); Square(x); std::vector<float> expected = {4.41, 6.76, 6.76, 16., 4.41, 5.29, 25., 0.81, 5.76, 2.56}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, ReciprocalTenValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>( &builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6}); Reciprocal(x); std::vector<float> expected = { 0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048, 0.43478261, -0.2, -1.11111111, -0.41666667, 0.625}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, SqrtZeroes) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {0.0, -0.0}); Sqrt(x); ComputeAndCompareR1<float>(&builder, {0, 0}, {}, error_spec_); } XLA_TEST_F(MathTest, SqrtSixValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {16.0, 1.0, 1024.0, 0.16, 0.2, 12345}); Sqrt(x); std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, CbrtSixF32Values) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331}); Cbrt(x); std::vector<float> expected = {2, 1, 16, -4, 1.2, 11}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.001)); } XLA_TEST_F(MathTest, CbrtSixF64Values) { XlaBuilder builder(TestName()); auto x = ConstantR1<double>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331}); Cbrt(x); std::vector<double> expected = {2, 1, 16, -4, 1.2, 11}; ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec(0.001)); } XLA_TEST_F(MathTest, SinhSmallValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11}); Sinh(x); std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, AsinhSmallValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11}); Asinh(x); std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, AtanhSmallValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {1e-8, 1e-9, 1e-10, 1e-11}); Atanh(x); std::vector<float> expected = {1e-8, 1e-9, 1e-10, 1e-11}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, Lgamma) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5, 2.5, -1.5, -3.5, -5.5}); Lgamma(x); std::vector<float> expected = { 0, 0, static_cast<float>(std::log(2)), static_cast<float>(std::log(6)), static_cast<float>(std::log(24)), static_cast<float>(std::log(120)), static_cast<float>(std::log(M_PI) / 2), static_cast<float>(std::log(M_PI) / 2 - std::log(2)), static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)), static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)), static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)), static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))}; error_spec_ = ErrorSpec{0.001}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } #if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16) XLA_TEST_F(MathTest, LgammaF16) { SetFastMathDisabled(true); XlaBuilder b(TestName()); auto x = ConstantR1<half>(&b, { half(-7360.0), half(-4066.0), half(-5.9605e-08), }); Lgamma(x); std::vector<half> expected = { std::numeric_limits<half>::infinity(), std::numeric_limits<half>::infinity(), half(16.64), }; ComputeAndCompareR1<half>(&b, expected, {}, ErrorSpec{0.1}); } #endif XLA_TEST_F(MathTest, Digamma) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125, 2.0, 3.0, 4.0, 6.0, 8.0, 9.0}); Digamma(x); constexpr double euler_mascheroni = 0.57721566490153286060651209008240243104215933593992; std::vector<float> expected = { static_cast<float>(-euler_mascheroni), static_cast<float>(-2 * std::log(2) - euler_mascheroni), static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 - euler_mascheroni), static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni), static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) - 3 * std::log(3) / 2 - euler_mascheroni), static_cast<float>( -M_PI / 2 - 4 * std::log(2) - (M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) / std::sqrt(2) - euler_mascheroni), static_cast<float>(1 - euler_mascheroni), static_cast<float>(1.5 - euler_mascheroni), static_cast<float>(11 / 6.0 - euler_mascheroni), static_cast<float>(137 / 60.0 - euler_mascheroni), static_cast<float>(363 / 140.0 - euler_mascheroni), static_cast<float>(761 / 280.0 - euler_mascheroni)}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, Igamma) { XlaBuilder builder(TestName()); auto a = ConstantR3FromArray3D<float>( &builder, {{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143}, {1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882}, {1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}}); auto x = ConstantR3FromArray3D<float>( &builder, {{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617}, {1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269}, {1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}}); Igamma(a, x); Array3D<float> expected = { {{0.78746926, 0.99940502, 0.98028261, 0.97033807, 0.99054696}, {0.33265522, 0.99983558, 0.32599159, 0.99923275, 0.99980893}, {0.74343963, 0.46703197, 0.33923541, 0.99978511, 0.99460685}}}; ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, IgammaSpecialValues) { SetFastMathDisabled(true); XlaBuilder builder(TestName()); const float nan = std::numeric_limits<float>::quiet_NaN(); auto a = ConstantR1<float>(&builder, {nan, nan, 0.53327996, -6.00773744602e+37, -1.3937809742e+31, -23.351348877}); auto x = ConstantR1<float>( &builder, {nan, 8.97671773, nan, nan, 0.0, 6.02455484352e-39}); Igamma(a, x); std::vector<float> expected = {nan, nan, nan, nan, nan, nan}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } #if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16) XLA_TEST_F(MathTest, IgammaF16) { SetFastMathDisabled(true); XlaBuilder builder(TestName()); auto a = ConstantR3FromArray3D<half>( &builder, {{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)}, {half(1.79378), half(1.05317), half(0.85049), half(1.3995)}, {half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}}); Igamma(a, a); Array3D<half> expected = { {{half(0.7068214), half(0.6041154), half(0.67748886), half(0.60799426)}, {half(0.599202), half(0.6288743), half(0.64280254), half(0.6121421)}, {half(0.6220287), half(0.6384635), half(0.6152258), half(0.6072449)}}}; ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-3}); } #endif XLA_TEST_F(MathTest, Igammac) { XlaBuilder builder(TestName()); auto a = ConstantR3FromArray3D<float>( &builder, {{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143}, {1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882}, {1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}}); auto x = ConstantR3FromArray3D<float>( &builder, {{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617}, {1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269}, {1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}}); Igammac(a, x); Array3D<float> expected = {{{2.12530741e-01, 5.94977775e-04, 1.97173867e-02, 2.96619296e-02, 9.45303689e-03}, {6.67344782e-01, 1.64421996e-04, 6.74008406e-01, 7.67252602e-04, 1.91071108e-04}, {2.56560373e-01, 5.32968026e-01, 6.60764593e-01, 2.14889688e-04, 5.39314824e-03}}}; ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_); } #if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16) XLA_TEST_F(MathTest, IgammacF16) { SetFastMathDisabled(true); XlaBuilder builder(TestName()); auto a = ConstantR3FromArray3D<half>( &builder, {{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)}, {half(1.79378), half(1.05317), half(0.85049), half(1.3995)}, {half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}}); Igammac(a, a); Array3D<half> expected = { {{half(0.29317862), half(0.39588454), half(0.32251117), half(0.39200574)}, {half(0.40079802), half(0.37112573), half(0.35719746), half(0.3878579)}, {half(0.3779713), half(0.36153653), half(0.38477424), half(0.39275512)}}}; ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-4}); } #endif XLA_TEST_F(MathTest, RoundToEven) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>( &builder, {-1.4, -1.5, -2.5, -0.5, 0, 0.5, 1.5, 2.5, 3.5, 4.5}); RoundToEven(x); std::vector<float> expected = {-1.0, -2.0, -2.0, -0.0, 0, 0.0, 2.0, 2.0, 4.0, 4.0}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, ErfRejectsComplexInputs) { XlaBuilder b(TestName()); auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}}); Erf(x); EXPECT_FALSE(b.Build().status().ok()); } XLA_TEST_F(MathTest, ErfcRejectsComplexInputs) { XlaBuilder b(TestName()); auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}}); Erfc(x); EXPECT_FALSE(b.Build().status().ok()); } XLA_TEST_F(MathTest, LgammaRejectsComplexInputs) { XlaBuilder b(TestName()); auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}}); Lgamma(x); EXPECT_FALSE(b.Build().status().ok()); } XLA_TEST_F(MathTest, DigammaRejectsComplexInputs) { XlaBuilder b(TestName()); auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}}); Digamma(x); EXPECT_FALSE(b.Build().status().ok()); } XLA_TEST_F(MathTest, RoundToEvenRejectsComplexInputs) { XlaBuilder b(TestName()); auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}}); RoundToEven(x); EXPECT_FALSE(b.Build().status().ok()); } XLA_TEST_F(MathTest, BesselI0eFloat) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>( &builder, {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0}); BesselI0e(x); std::vector<float> expected = {0.0897803118848, 0.0947062952128, 0.100544127361, 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857, 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554, 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163, 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128, 0.0897803118848}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI0eDouble)) { XlaBuilder builder(TestName()); auto x = ConstantR1<double>( &builder, {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0}); BesselI0e(x); std::vector<double> expected = {0.0897803118848, 0.0947062952128, 0.100544127361, 0.107615251671, 0.116426221213, 0.127833337163, 0.143431781857, 0.16665743264, 0.207001921224, 0.308508322554, 1.0, 0.308508322554, 0.207001921224, 0.16665743264, 0.143431781857, 0.127833337163, 0.116426221213, 0.107615251671, 0.100544127361, 0.0947062952128, 0.0897803118848}; ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, BesselI1eFloat) { XlaBuilder builder(TestName()); auto x = ConstantR1<float>( &builder, {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0}); BesselI1e(x); std::vector<float> expected = {-0.0875062221833, -0.092036796872, -0.0973496147565, -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293, -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249, 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384, 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872, 0.0875062221833}; ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI1eDouble)) { XlaBuilder builder(TestName()); auto x = ConstantR1<double>( &builder, {-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0}); BesselI1e(x); std::vector<double> expected = {-0.0875062221833, -0.092036796872, -0.0973496147565, -0.103697667463, -0.11146429929, -0.121262681384, -0.134142493293, -0.152051459309, -0.178750839502, -0.215269289249, 0.0, 0.215269289249, 0.178750839502, 0.152051459309, 0.134142493293, 0.121262681384, 0.11146429929, 0.103697667463, 0.0973496147565, 0.092036796872, 0.0875062221833}; ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, AcosComplexValues) { XlaBuilder builder(TestName()); auto x = ConstantR1<std::complex<float>>( &builder, {{0, 0}, {0, 1}, {1, 1}, {0.8, 0.2}}); Acos(x); std::vector<std::complex<float>> expected = { {1.5707963267948966, 0}, {1.5707963267948966, -0.881373587019543}, {0.9045568943023814, -1.0612750619050357}, {0.7011246914497526, -0.30527648462436596}}; ComputeAndCompareR1<std::complex<float>>(&builder, expected, {}, error_spec_); } XLA_TEST_F(MathTest, ZetaF64) { XlaBuilder builder(TestName()); auto x = ConstantR1<double>(&builder, {2.0}); auto q = ConstantR1<double>(&builder, {1.0}); Zeta(x, q); std::vector<double> expected = {1.64493406684823}; ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec{0.00000000000001}); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5ac1b344-4708-4ddf-93cf-5d8ed5df8d87
cpp
tensorflow/tensorflow
reader
tensorflow/cc/saved_model/reader.cc
tensorflow/cc/saved_model/reader_test.cc
#include "tensorflow/cc/saved_model/reader.h" #include <memory> #include <string> #include <unordered_set> #include <utility> #include "absl/memory/memory.h" #include "absl/status/statusor.h" #include "tensorflow/cc/saved_model/constants.h" #include "tensorflow/cc/saved_model/metrics.h" #include "tensorflow/cc/saved_model/util.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/file_system_helper.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/meta_graph.pb.h" #include "tensorflow/core/protobuf/saved_model.pb.h" #include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h" #define IS_OSS true namespace tensorflow { absl::StatusOr<MetaGraphDef*> FindMetaGraphDef( const std::unordered_set<string>& tags, SavedModel* saved_model_proto) { LOG(INFO) << "Reading meta graph with tags { " << absl::StrJoin(tags, " ") << " }"; for (MetaGraphDef& graph_def : *saved_model_proto->mutable_meta_graphs()) { std::unordered_set<string> graph_tags; for (const string& tag : graph_def.meta_info_def().tags()) { graph_tags.insert(tag); } if (graph_tags == tags) { MetaGraphDef* meta_graph_def = &graph_def; if (!port::kLittleEndian) { TF_RETURN_IF_ERROR(ByteSwapTensorContentInMetaGraphDef(meta_graph_def)); } return meta_graph_def; } } return Status( absl::StatusCode::kNotFound, strings::StrCat( "Could not find meta graph def matching supplied tags: { ", absl::StrJoin(tags, " "), " }. To inspect available tag-sets in the SavedModel, please " "use the SavedModel CLI: `saved_model_cli`")); } Status ReadSavedModel(absl::string_view export_dir, SavedModel* saved_model_proto) { LOG(INFO) << "Reading SavedModel from: " << export_dir; if (IS_OSS) { const std::string saved_model_pb_path = io::JoinPath(export_dir, kSavedModelFilenamePb); TF_ASSIGN_OR_RETURN( bool saved_model_pb_exists, internal::FileExists(Env::Default(), saved_model_pb_path)); if (saved_model_pb_exists) { Status result = ReadBinaryProto(Env::Default(), saved_model_pb_path, saved_model_proto); if (result.ok()) { metrics::SavedModelReadCount( saved_model::GetWriteVersion(*saved_model_proto)) .IncrementBy(1); } return result; } } const std::string saved_model_pbtxt_path = io::JoinPath(export_dir, kSavedModelFilenamePbTxt); auto saved_model_pbtxt_exists = internal::FileExists(Env::Default(), saved_model_pbtxt_path); if (saved_model_pbtxt_exists.value_or(false)) { Status result = ReadTextProto(Env::Default(), saved_model_pbtxt_path, saved_model_proto); if (result.ok()) { metrics::SavedModelReadCount( saved_model::GetWriteVersion(*saved_model_proto)) .IncrementBy(1); } return result; } if (!IS_OSS) { } return Status( absl::StatusCode::kNotFound, strings::StrCat("Could not find SavedModel .pb or .pbtxt at supplied " "export directory path: ", export_dir, ". Check that " "the directory exists and that you have the right " "permissions for accessing it.")); } Status ReadMetaGraphDefFromSavedModel(absl::string_view export_dir, const std::unordered_set<string>& tags, MetaGraphDef* const meta_graph_def) { SavedModel saved_model_proto; TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto)); TF_ASSIGN_OR_RETURN(MetaGraphDef * m, FindMetaGraphDef(tags, &saved_model_proto)); *meta_graph_def = std::move(*m); return absl::OkStatus(); } Status ReadSavedModelDebugInfoIfPresent( absl::string_view export_dir, std::unique_ptr<GraphDebugInfo>* debug_info_proto) { LOG(INFO) << "Reading SavedModel debug info (if present) from: " << export_dir; const string debug_info_pb_path = io::JoinPath(export_dir, "debug", "saved_model_debug_info.pb"); TF_ASSIGN_OR_RETURN(bool debug_info_pb_exists, internal::FileExists(Env::Default(), debug_info_pb_path)); if (debug_info_pb_exists) { GraphDebugInfo debug_info; TF_RETURN_IF_ERROR( ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info)); *debug_info_proto = std::make_unique<GraphDebugInfo>(std::move(debug_info)); } return absl::OkStatus(); } }
#include "tensorflow/cc/saved_model/reader.h" #include <gmock/gmock.h> #include "tensorflow/cc/saved_model/constants.h" #include "tensorflow/cc/saved_model/metrics.h" #include "tensorflow/cc/saved_model/tag_constants.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/resource_loader.h" namespace tensorflow { namespace { string TestDataPbTxt() { return io::JoinPath("tensorflow", "cc", "saved_model", "testdata", "half_plus_two_pbtxt", "00000123"); } string TestDataSharded() { return io::JoinPath("tensorflow", "cc", "saved_model", "testdata", "half_plus_two", "00000123"); } string ChunkedSavedModel() { return io::JoinPath("tensorflow", "cc", "saved_model", "testdata", "chunked_saved_model", "chunked_model"); } string NonChunkedSavedModel() { return io::JoinPath("tensorflow", "cc", "saved_model", "testdata", "chunked_saved_model", "non_chunked_model"); } class ReaderTest : public ::testing::Test { protected: ReaderTest() {} void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) { const auto& tags = meta_graph_def.meta_info_def().tags(); EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) != tags.end()); EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), ""); EXPECT_EQ( meta_graph_def.signature_def().at("serving_default").method_name(), "tensorflow/serving/predict"); } }; TEST_F(ReaderTest, TagMatch) { MetaGraphDef meta_graph_def; const string export_dir = GetDataDependencyFilepath(TestDataSharded()); TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe}, &meta_graph_def)); CheckMetaGraphDef(meta_graph_def); } TEST_F(ReaderTest, NoTagMatch) { MetaGraphDef meta_graph_def; const string export_dir = GetDataDependencyFilepath(TestDataSharded()); Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"}, &meta_graph_def); EXPECT_FALSE(st.ok()); EXPECT_TRUE(absl::StrContains( st.message(), "Could not find meta graph def matching supplied tags: { missing-tag }")) << st.message(); } TEST_F(ReaderTest, NoTagMatchMultiple) { MetaGraphDef meta_graph_def; const string export_dir = GetDataDependencyFilepath(TestDataSharded()); Status st = ReadMetaGraphDefFromSavedModel( export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def); EXPECT_FALSE(st.ok()); EXPECT_TRUE(absl::StrContains( st.message(), "Could not find meta graph def matching supplied tags: ")) << st.message(); } TEST_F(ReaderTest, InvalidExportPath) { MetaGraphDef meta_graph_def; const string export_dir = GetDataDependencyFilepath("missing-path"); Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe}, &meta_graph_def); EXPECT_FALSE(st.ok()); } TEST_F(ReaderTest, ReadSavedModelDebugInfoIfPresent) { const string export_dir = GetDataDependencyFilepath(TestDataSharded()); std::unique_ptr<GraphDebugInfo> debug_info_proto; TF_ASSERT_OK(ReadSavedModelDebugInfoIfPresent(export_dir, &debug_info_proto)); } TEST_F(ReaderTest, MetricsNotUpdatedFailedRead) { MetaGraphDef meta_graph_def; const int read_count_v1 = metrics::SavedModelReadCount("1").value(); const int read_count_v2 = metrics::SavedModelReadCount("2").value(); const string export_dir = GetDataDependencyFilepath("missing-path"); Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def); EXPECT_FALSE(st.ok()); EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1); EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count_v2); } TEST_F(ReaderTest, MetricsUpdatedSuccessfulRead) { MetaGraphDef meta_graph_def; const int read_count_v1 = metrics::SavedModelReadCount("1").value(); const string export_dir = GetDataDependencyFilepath(TestDataSharded()); Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def); EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1 + 1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
60b3f1ce-5ea1-464a-9c6d-070957e277da
cpp
tensorflow/tensorflow
hlo_bisect_state
third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc
third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc
#include "xla/tools/hlo_bisect/hlo_bisect_state.h" #include <iterator> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_dce.h" #include "xla/tests/test_utils.h" #include "xla/util.h" namespace xla { namespace bisect { namespace { std::vector<HloInstruction*> GetModifiedInstructionPostOrder( HloComputation* computation) { std::vector<HloInstruction*> instructions( computation->parameter_instructions().begin(), computation->parameter_instructions().end()); absl::c_copy_if(computation->MakeInstructionPostOrder(), std::back_inserter(instructions), [&](const HloInstruction* instr) { return instr->opcode() != HloOpcode::kParameter; }); return instructions; } absl::Status MorphModuleWithOutputs(HloModule* module, absl::Span<HloInstruction* const> outputs) { HloComputation* entry_computation = module->entry_computation(); HloInstruction* new_root = outputs.size() == 1 ? outputs[0] : entry_computation->AddInstruction( HloInstruction::CreateTuple(outputs)); entry_computation->set_root_instruction(new_root, true); *module->mutable_entry_computation_layout() = module->compute_computation_layout(); HloDCE dce; absl::StatusOr<bool> dce_result = dce.Run(module); return dce_result.status(); } absl::Status MorphModuleWithInstructions( HloModule* module, absl::Span<HloInstruction* const> instructions) { ConstHloInstructionSet in_range_instructions(instructions.begin(), instructions.end()); auto keep_result = [&](const HloInstruction* instruction) { return instruction->opcode() != HloOpcode::kParameter && !absl::c_any_of(instruction->users(), [&](const HloInstruction* user) { return in_range_instructions.count(user) != 0; }); }; std::vector<HloInstruction*> outputs; absl::c_copy_if(instructions, std::back_inserter(outputs), keep_result); return MorphModuleWithOutputs(module, outputs); } absl::Status MorphModuleWithInstructions(HloModule* module, size_t num_instructions) { std::vector<HloInstruction*> ordered_instructions = GetModifiedInstructionPostOrder(module->entry_computation()); HloInstruction* const* instructions_begin = &ordered_instructions.front(); return MorphModuleWithInstructions( module, absl::MakeSpan(instructions_begin, num_instructions)); } absl::Status MorphModuleWithLiterals( HloModule* module, absl::flat_hash_map<std::string, Literal> literal_map) { HloComputation* entry_computation = module->entry_computation(); absl::flat_hash_map<HloInstruction*, Literal> replace_map; for (HloInstruction* instruction : entry_computation->instructions()) { auto it = literal_map.find(instruction->name()); if (it != literal_map.end()) { replace_map.emplace(instruction, std::move(it->second)); } } for (auto& [instruction, literal] : replace_map) { if (!instruction->IsDead()) { HloInstruction* new_instruction = entry_computation->AddInstruction( HloInstruction::CreateConstant(std::move(literal))); absl::Status replace_status = entry_computation->ReplaceInstruction(instruction, new_instruction); TF_RETURN_IF_ERROR(replace_status); } } xla::HloDCE dce; absl::StatusOr<bool> dce_status = dce.Run(module); return dce_status.status(); } bool InstructionNotReplaceableWithConstant(HloInstruction* instruction) { return instruction->shape().is_dynamic() || instruction->opcode() == HloOpcode::kConstant || instruction->opcode() == HloOpcode::kTuple || instruction->opcode() == HloOpcode::kParameter; } } absl::StatusOr<bool> HloBisectState::ShouldProcess() { return RunModule(*module_); } absl::StatusOr<bool> HloBisectState::TrimEntryComputation() { bool changed_in_loop = false; bool changed = false; for (int iter = 0; changed || iter < 2; iter++) { if (iter % 2 == 0) { VLOG(2) << "Trimming by outputs, iteration " << iter; TF_ASSIGN_OR_RETURN(changed, TrimByOutputs()); } else { VLOG(2) << "Trimming by instructions, iteration " << iter; TF_ASSIGN_OR_RETURN(changed, TrimByInstructions()); } changed_in_loop |= changed; } VLOG(2) << "Trimming by replacing instructions with literals"; TF_ASSIGN_OR_RETURN(changed, TrimByUsingConstants()); VLOG(2) << "Final module: " << module_->ToString(); return changed || changed_in_loop; } std::unique_ptr<xla::HloModule>&& HloBisectState::GetResult() { return std::move(module_); } absl::StatusOr<bool> HloBisectState::RunModule(const HloModule& module) { VLOG(3) << "Modified module: " << module.ToString(); absl::StatusOr<bool> bug_result = bug_checker_->Run(module); TF_RETURN_IF_ERROR(bug_result.status()); VLOG(3) << "Bug checker result: " << bug_result.value(); if (!bug_result.value()) { for (HloInstruction* instr : module.entry_computation()->instructions()) { foldable_instructions_.emplace(instr->name()); } for (auto& [key, value] : bug_checker_->GetResults()) { foldable_instructions_values_[key] = std::move(value); } } return bug_result; } absl::StatusOr<bool> HloBisectState::TrimByOutputs() { HloInstruction* root_instruction = module_->entry_computation()->root_instruction(); if (root_instruction->opcode() != HloOpcode::kTuple || root_instruction->operand_count() < 2) { return false; } auto run_modified = [&](int64_t start, int64_t end) -> absl::StatusOr<bool> { std::unique_ptr<HloModule> new_module = module_->Clone(""); HloInstruction* const* new_operands = new_module->entry_computation()->root_instruction()->operands().begin(); TF_RETURN_IF_ERROR(MorphModuleWithOutputs( new_module.get(), absl::MakeSpan(new_operands + start, end - start + 1))); return RunModule(*new_module); }; int64_t bisect_low = 0; int64_t bisect_high = root_instruction->operand_count() - 1; while (bisect_low < bisect_high) { int64_t cur = bisect_low + (bisect_high - bisect_low) / 2; VLOG(2) << "Number of outputs: " << (cur - bisect_low + 1) << " [" << bisect_low << ".." << cur << "]"; TF_ASSIGN_OR_RETURN(bool has_bug, run_modified(bisect_low, cur)); if (has_bug) { bisect_high = cur; } else { TF_ASSIGN_OR_RETURN(has_bug, run_modified(cur + 1, bisect_high)); if (has_bug) { bisect_low = cur + 1; } else { break; } } } bool changed = (bisect_high - bisect_low) < (root_instruction->operand_count() - 1); if (changed) { TF_RETURN_IF_ERROR(MorphModuleWithOutputs( module_.get(), absl::MakeSpan(root_instruction->operands().begin() + bisect_low, bisect_high - bisect_low + 1))); TF_RETURN_IF_ERROR(ExpectModuleIsBuggy()); } return changed; } absl::StatusOr<bool> HloBisectState::TrimByInstructions() { HloComputation* computation = module_->entry_computation(); int64_t upper_bound = computation->instruction_count() - computation->root_instruction()->shape().IsTuple(); int64_t bisect_low = computation->num_parameters() - 1; int64_t bisect_high = upper_bound; while (bisect_low + 1 < bisect_high) { int64_t cur = bisect_low + (bisect_high - bisect_low) / 2; VLOG(2) << "Number of instructions: " << cur << " (of " << computation->instruction_count() << ")"; std::unique_ptr<HloModule> new_module = module_->Clone(""); TF_RETURN_IF_ERROR(MorphModuleWithInstructions(new_module.get(), cur)); TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module)); if (has_bug) { bisect_high = cur; } else { bisect_low = cur; } } if (bisect_high == computation->num_parameters()) { return Internal( "The checker fails on an empty computation! Something is not right. " "Can't bisect."); } bool changed = bisect_high < upper_bound; if (changed) { TF_RETURN_IF_ERROR(MorphModuleWithInstructions(module_.get(), bisect_high)); TF_RETURN_IF_ERROR(ExpectModuleIsBuggy()); } return changed; } absl::StatusOr<bool> HloBisectState::TrimByUsingConstants() { absl::flat_hash_map<std::string, Literal> literal_map; int64_t random_literals_count = 0; for (HloInstruction* instr : module_->entry_computation()->instructions()) { if (InstructionNotReplaceableWithConstant(instr)) { continue; } if (foldable_instructions_values_.contains(instr->name())) { auto it = foldable_instructions_values_.extract(instr->name()); literal_map.insert(std::move(it)); } else if (foldable_instructions_.contains(instr->name())) { absl::StatusOr<Literal> literal_status = MakeFakeLiteral(instr->shape()); TF_RETURN_IF_ERROR(literal_status.status()); literal_map[instr->name()] = std::move(literal_status).value(); ++random_literals_count; } } VLOG(2) << "Number of literals: " << literal_map.size() << " (random: " << random_literals_count << ")"; std::unique_ptr<HloModule> new_module = module_->Clone(""); TF_RETURN_IF_ERROR( MorphModuleWithLiterals(new_module.get(), std::move(literal_map))); TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module)); if (has_bug) { std::swap(module_, new_module); } return has_bug; } absl::Status HloBisectState::ExpectModuleIsBuggy() { TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*module_)); if (has_bug) { return absl::OkStatus(); } const int retry_count = 5; int bug_count = 0; for (int i = 0; i < retry_count; i++) { TF_ASSIGN_OR_RETURN(has_bug, bug_checker_->Run(*module_)); if (has_bug) { bug_count++; } } if (bug_count != 0) { return InternalStrCat("The checker is non deterministic! (only ", bug_count, " failures seen in ", (retry_count + 1), " runs)"); } return Internal("We \"lost\" the bug while bisecting!"); } } }
#include "xla/tools/hlo_bisect/hlo_bisect_state.h" #include <initializer_list> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace bisect { namespace { namespace m = match; using HloBisectStateTest = HloTestBase; class TestBugSearch : public BugCheckerInterface { public: TestBugSearch(std::initializer_list<HloOpcode> opcodes) : opcodes_(opcodes) {} absl::StatusOr<bool> Run(const HloModule& module) override { auto has_opcode = [&](HloOpcode opcode) { return absl::c_any_of(module.entry_computation()->instructions(), [opcode](const HloInstruction* instr) { return instr->opcode() == opcode; }); }; return absl::c_all_of(opcodes_, has_opcode); } absl::flat_hash_map<std::string, Literal> GetResults() override { return {}; } private: std::vector<HloOpcode> opcodes_; }; Literal CreateLiteral(float value) { Literal result = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {})); result.PopulateWithValue(value); return result; } TEST_F(HloBisectStateTest, TrimByOutputs) { const char* kModuleStr = R"( HloModule test_module ENTRY test_computation { p1 = s32[8] parameter(0) p2 = s32[8] parameter(1) a = s32[8] add(p1, p2) b = s32[8] multiply(p1, p2) c = s32[8] subtract(p1, p2) ROOT sum = tuple(a, b, c) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); TestBugSearch bug_checker({HloOpcode::kMultiply}); HloBisectState bisect(std::move(module), &bug_checker); TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation()); EXPECT_TRUE(changed); auto reduced_module = std::move(bisect).GetResult(); EXPECT_THAT(reduced_module->entry_computation()->root_instruction(), GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1)))); } TEST_F(HloBisectStateTest, TrimByInstructions) { const char* kModuleStr = R"( HloModule axpy_module ENTRY axpy_computation { alpha = f32[] parameter(0) broadcast = f32[10] broadcast(alpha), dimensions={} x = f32[10] parameter(1) ax = f32[10] multiply(broadcast, x) y = f32[10] parameter(2) ROOT add = f32[10] add(ax, y) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); TestBugSearch bug_checker({HloOpcode::kMultiply, HloOpcode::kBroadcast}); HloBisectState bisect(std::move(module), &bug_checker); TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation()); EXPECT_TRUE(changed); auto reduced_module = std::move(bisect).GetResult(); EXPECT_THAT( reduced_module->entry_computation()->root_instruction(), GmockMatch(m::Multiply(m::Broadcast(m::Parameter(0)), m::Parameter(1)))); } TEST_F(HloBisectStateTest, TrimByUsingRandomConstants) { const char* kModuleStr = R"( HloModule test_module ENTRY test_computation { p1 = f32[4] parameter(0) p2 = f32[4] parameter(1) a = f32[4] multiply(p1, p2) b = f32[4] add(p1, p2) ROOT result = f32[4] power(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); TestBugSearch bug_checker({HloOpcode::kPower}); HloBisectState bisect(std::move(module), &bug_checker); TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation()); EXPECT_TRUE(changed); auto reduced_module = std::move(bisect).GetResult(); EXPECT_THAT(reduced_module->entry_computation()->root_instruction(), GmockMatch(m::Power(m::Constant(), m::Constant()))); } TEST_F(HloBisectStateTest, TrimByUsingReferenceConstants) { class TestBugSearchWithReferenceConstants : public TestBugSearch { public: TestBugSearchWithReferenceConstants() : TestBugSearch({HloOpcode::kPower}) {} absl::flat_hash_map<std::string, Literal> GetResults() override { absl::flat_hash_map<std::string, Literal> results; results["a"] = CreateLiteral(2.0f); results["b"] = CreateLiteral(3.0f); return results; } }; const char* kModuleStr = R"( HloModule test_module ENTRY test_computation { p1 = f32[] parameter(0) p2 = f32[] parameter(1) a = f32[] multiply(p1, p2) b = f32[] add(p1, p2) ROOT result = f32[] power(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); TestBugSearchWithReferenceConstants bug_checker; HloBisectState bisect(std::move(module), &bug_checker); TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation()); EXPECT_TRUE(changed); auto reduced_module = std::move(bisect).GetResult(); EXPECT_THAT(reduced_module->entry_computation()->root_instruction(), GmockMatch(m::Power(m::Constant(), m::Constant()))); } TEST_F(HloBisectStateTest, TrimByOutputsLostBug) { class CustomBugSearch : public TestBugSearch { public: CustomBugSearch() : TestBugSearch({HloOpcode::kConstant}) {} absl::StatusOr<bool> Run(const HloModule& module) override { TF_ASSIGN_OR_RETURN(bool has_constants, TestBugSearch::Run(module)); int program_size = module.entry_computation()->instruction_count(); return program_size == 5 && !has_constants; } }; const char* kModuleStr = R"( HloModule test_module ENTRY test_computation { p1 = s32[8] parameter(0) p2 = s32[8] parameter(1) a = s32[8] add(p1, p2) b = s32[8] multiply(p1, p2) ROOT sum = tuple(a, b) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); CustomBugSearch bug_checker; HloBisectState bisect(std::move(module), &bug_checker); TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation()); EXPECT_FALSE(changed); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
01655d2e-ef4c-487f-b26b-7a6ff14cd974
cpp
tensorflow/tensorflow
hlo_expand
third_party/xla/xla/tools/hlo_expand.cc
third_party/xla/xla/tools/tests/hlo_expand_test.cc
#include "xla/tools/hlo_expand.h" #include <vector> #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/batchnorm_expander.h" #include "xla/service/cholesky_expander.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_verifier.h" #include "xla/service/rng_bit_generator_expander.h" #include "xla/service/rng_expander.h" #include "xla/service/sharding_propagation.h" #include "xla/service/spmd/stateful_rng_spmd_partitioner.h" #include "xla/service/triangular_solve_expander.h" #include "xla/tsl/util/command_line_flags.h" #include "xla/xla_data.pb.h" namespace xla { void AddPassesToPipeline(HloExpandConfig& config, HloPassPipeline& pipeline, const HloModuleConfig& hlo_module_config) { if (config.batch_norm_grad_expander || config.batch_norm_inference_expander || config.batch_norm_training_expander) { pipeline.AddPass<xla::BatchNormExpander>( config.batch_norm_training_expander, config.batch_norm_inference_expander, config.batch_norm_grad_expander); } if (config.cholesky_expander) { pipeline.AddPass<xla::CholeskyExpander>(); } if (config.rng_expander) { pipeline.AddPass<xla::RngExpander>(); } if (config.rng_bit_generator_philox_expander) { pipeline.AddPass<xla::RngBitGeneratorExpander>( xla::RandomAlgorithm::RNG_PHILOX); } if (config.rng_bit_generator_three_fry_expander) { pipeline.AddPass<xla::RngBitGeneratorExpander>( xla::RandomAlgorithm::RNG_THREE_FRY); } if (config.triangular_solve_expander) { pipeline.AddPass<xla::TriangularSolveExpander>(); } if (config.spmd_expander) { pipeline.AddPass<ShardingPropagation>( true, false, hlo_module_config.allow_spmd_sharding_propagation_to_output(), hlo_module_config.allow_spmd_sharding_propagation_to_parameters()); pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>( hlo_module_config.num_partitions(), hlo_module_config.replica_count(), hlo_module_config.debug_options() .xla_gpu_threshold_for_windowed_einsum_mib()); } if (config.verify_hlo) { pipeline.AddPass<xla::HloVerifier>(false, false); } } std::vector<tsl::Flag> GetFlags(HloExpandConfig& config) { return { tsl::Flag("h", &config.help, "Alias of --help"), tsl::Flag("help", &config.help, "Display available options"), tsl::Flag( "input_format", &config.input_format, "The format of the input file. If this flag is not specified, it's" "inferred from the file extension instead. Valid values:\n " "* hlo|txt : HLO textual format\n" "* pb : xla::HloProto in binary proto format\n" "* pbtxt : xla::HloProto in text proto format"), tsl::Flag("o", &config.output_file, "Alias of --output_file="), tsl::Flag("output_file", &config.output_file, "Full output file path"), tsl::Flag("output_format", &config.output_format, "The format of the output file. Defaults to input_format. " "Valid values:\n" "* hlo|txt : HLO textual format\n" "* pb : xla::HloProto in binary proto format\n" "* pbtxt : xla::HloProto in text proto format"), tsl::Flag("batch_norm_expander", &config.batch_norm_expander, "Overrides and expands batch_norm_grad, batch_norm_inference, " "and batch_norm_training ops"), tsl::Flag("batch_norm_grad_expander", &config.batch_norm_grad_expander, "Expands batch_norm_grad op"), tsl::Flag("batch_norm_inference_expander", &config.batch_norm_inference_expander, "Expands batch_norm_inference_grad op"), tsl::Flag("batch_norm_training_expander", &config.batch_norm_training_expander, "Expands batch_norm_training_grad op"), tsl::Flag("cholesky_expander", &config.cholesky_expander, "Expands cholesky op"), tsl::Flag("spmd_expander", &config.spmd_expander, "Expands SPMD sharding"), tsl::Flag("expand_all", &config.expand_all, "Overrides and expands all supported passes below"), tsl::Flag("rng_expander", &config.rng_expander, "Expands rng op"), tsl::Flag( "rng_bit_generator_expander", &config.rng_bit_generator_expander, "Overrides and expands rng_bit_generator op on all prng algorithms"), tsl::Flag("rng_bit_generator_philox_expander", &config.rng_bit_generator_philox_expander, "Expands rng_bit_generator op using philox prng algorithm"), tsl::Flag("rng_bit_generator_three_fry_expander", &config.rng_bit_generator_three_fry_expander, "Expands rng_bit_generator op using three_fry prng algorithm"), tsl::Flag("triangular_solve_expander", &config.triangular_solve_expander, "Expands triangular_solve op"), tsl::Flag("verify_hlo", &config.verify_hlo, "Run HLO verifier after passes"), }; } void ParseCompoundFlags(HloExpandConfig& config) { config.batch_norm_grad_expander |= config.expand_all || config.batch_norm_expander; config.batch_norm_inference_expander |= config.expand_all || config.batch_norm_expander; config.batch_norm_training_expander |= config.expand_all || config.batch_norm_expander; config.cholesky_expander |= config.expand_all; config.rng_bit_generator_philox_expander |= config.expand_all || config.rng_bit_generator_expander; config.rng_bit_generator_three_fry_expander |= config.expand_all || config.rng_bit_generator_expander; config.rng_expander |= config.expand_all; config.triangular_solve_expander |= config.expand_all; } }
#include <string> #include <vector> #include <gmock/gmock.h> #include "tsl/platform/path.h" #include "tsl/platform/subprocess.h" #include "tsl/platform/test.h" namespace xla { namespace { class HloExpandTest : public ::testing::Test { protected: void HloOpt(std::vector<std::string>& additional_flags) { std::string hlo_opt_bin = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo-expand"); tsl::SubProcess proc; std::vector<std::string> argv = {hlo_opt_bin}; argv.insert(argv.end(), additional_flags.begin(), additional_flags.end()); proc.SetProgram(hlo_opt_bin, argv); proc.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE); proc.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE); EXPECT_TRUE(proc.Start()); stdout_output_ = stderr_output_ = ""; int status = proc.Communicate(nullptr, &stdout_output_, &stderr_output_); #if defined(_WIN32) || defined(_WIN64) exited_normally_ = (status == 0); exit_status_ = status; #else exited_normally_ = WIFEXITED(status); exit_status_ = exited_normally_ ? WEXITSTATUS(status) : -1; #endif } std::string stdout_output_; std::string stderr_output_; bool exited_normally_ = false; int exit_status_ = -1; }; TEST_F(HloExpandTest, CholeskyHlo) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "cholesky.hlo"); std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path}; HloOpt(additional_flags); const std::string& expected_hlo_string = R"(HloModule main, entry_computation_layout={()->f64[3,3]{1,0}} ENTRY %main.3 () -> f64[3,3] { %constant.1 = f64[3,3]{1,0} constant({ { 1, 2, 3 }, { 2, 20, 26 }, { 3, 26, 70 } }) ROOT %cholesky.2 = f64[3,3]{1,0} cholesky(f64[3,3]{1,0} %constant.1), lower=true })"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 0); EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string)); } TEST_F(HloExpandTest, SpmdHlo) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "spmd.hlo"); std::vector<std::string> additional_flags = {"--spmd_expander", hlo_path}; HloOpt(additional_flags); const std::string& expected_hlo_string = R"(HloModule module, entry_computation_layout={(f32[24,64]{1,0}, f32[39296,64]{1,0})->f32[24,19648]{1,0}}, num_partitions=2 ENTRY %entry_spmd (param: f32[24,64], param.1: f32[39296,64]) -> f32[24,19648] { %param = f32[24,64]{1,0} parameter(0), sharding={replicated} %lhs.copy.1 = f32[24,64]{1,0} copy(f32[24,64]{1,0} %param) %param.1 = f32[39296,64]{1,0} parameter(1), sharding={replicated} %constant = s32[2]{0} constant({0, 19648}) %partition-id = u32[] partition-id() %dynamic-slice = s32[1]{0} dynamic-slice(s32[2]{0} %constant, u32[] %partition-id), dynamic_slice_sizes={1} %reshape = s32[] reshape(s32[1]{0} %dynamic-slice) %constant.1 = s32[] constant(0) %dynamic-slice.1 = f32[19648,64]{1,0} dynamic-slice(f32[39296,64]{1,0} %param.1, s32[] %reshape, s32[] %constant.1), dynamic_slice_sizes={19648,64} %rhs.copy.1 = f32[19648,64]{1,0} copy(f32[19648,64]{1,0} %dynamic-slice.1) ROOT %dot.1 = f32[24,19648]{1,0} dot(f32[24,64]{1,0} %lhs.copy.1, f32[19648,64]{1,0} %rhs.copy.1), lhs_contracting_dims={1}, rhs_contracting_dims={1} })"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 0); EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string)); } TEST_F(HloExpandTest, CholeskyExpanderHlo) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "cholesky.hlo"); std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path, "--expand_all"}; HloOpt(additional_flags); const std::string& expected_hlo_string = "%xla.cholesky_f64"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 0); EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string)); } TEST_F(HloExpandTest, InvalidArgc) { std::vector<std::string> additional_flags = {"--input_format=hlo", "foo", "bar", "baz"}; HloOpt(additional_flags); const std::string& expected_string = "Cannot parse more than one argument. See usage below:"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, InvalidInputFileExtension) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "foo.bar"); std::vector<std::string> additional_flags = {hlo_path}; HloOpt(additional_flags); const std::string& expected_string = "input_format must be specified as [hlo|pb|pbtxt|txt]."; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, InvalidInputFormat) { std::vector<std::string> additional_flags = {"--input_format=foo"}; HloOpt(additional_flags); const std::string& expected_string = "input_format must be specified as [hlo|pb|pbtxt|txt]."; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, InvalidOutputFileExtension) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "cholesky.hlo"); std::string output_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "foo.bar"); std::vector<std::string> additional_flags = {"--input_format=", hlo_path, "--output_file=" + output_path}; HloOpt(additional_flags); const std::string& expected_string = "output_format must be specified as [hlo|pb|pbtxt]."; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, InvalidOutputFormat) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "cholesky.hlo"); std::vector<std::string> additional_flags = {"--input_format=", hlo_path, "--output_format=foo"}; HloOpt(additional_flags); const std::string& expected_string = "output_format must be specified as [hlo|pb|pbtxt]."; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, InvalidFile) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "foo.bar"); std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path}; HloOpt(additional_flags); const std::string& expected_string = "Try: hlo-expand --help"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, UnsupportedOutputFormat) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "cholesky.hlo"); std::vector<std::string> additional_flags = {"--input_format=hlo", "--output_format=pb", hlo_path}; HloOpt(additional_flags); const std::string& expected_string = "Printing to stdout must specify supported " "output_format=[hlo|pbtxt|txt]."; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } TEST_F(HloExpandTest, VerificationFailure) { std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "tests", "invalid_concat.hlo"); std::vector<std::string> additional_flags = {"--verify_hlo", hlo_path}; HloOpt(additional_flags); const std::string& expected_string = "Cannot concatenate arrays that differ in dimensions"; EXPECT_TRUE(exited_normally_); EXPECT_EQ(exit_status_, 1); EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_expand.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/tests/hlo_expand_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
079d69b0-65c5-4dbf-b05a-cfe05100b0de
cpp
tensorflow/tensorflow
stable_delegate_plugin
tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc
tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc
#include "tensorflow/lite/acceleration/configuration/stable_delegate_plugin.h" namespace tflite { namespace delegates { TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(StableDelegatePlugin, StableDelegatePlugin::New); } }
#include <memory> #include <gtest/gtest.h> #include "pthreadpool.h" #include "tensorflow/lite/acceleration/configuration/configuration_generated.h" #include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h" #include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h" namespace tflite { class StableDelegatePluginTest : public testing::Test { public: static constexpr int kNumThreadsForTest = 7; static constexpr tflite::XNNPackFlags kFlagsForTest = tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8; static constexpr char kDelegateBinaryPath[] = "tensorflow/lite/delegates/utils/experimental/" "stable_delegate/libtensorflowlite_stable_xnnpack_delegate.so"; void SetUp() override { flatbuffers::Offset<flatbuffers::String> stable_delegate_path_offset = flatbuffer_builder_.CreateString(kDelegateBinaryPath); StableDelegateLoaderSettingsBuilder stable_delegate_loader_settings_builder( flatbuffer_builder_); stable_delegate_loader_settings_builder.add_delegate_path( stable_delegate_path_offset); flatbuffers::Offset<StableDelegateLoaderSettings> stable_delegate_loader_settings = stable_delegate_loader_settings_builder.Finish(); XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_); xnnpack_settings_builder.add_num_threads(kNumThreadsForTest); xnnpack_settings_builder.add_flags(kFlagsForTest); flatbuffers::Offset<XNNPackSettings> xnnpack_settings = xnnpack_settings_builder.Finish(); TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_); tflite_settings_builder.add_stable_delegate_loader_settings( stable_delegate_loader_settings); tflite_settings_builder.add_xnnpack_settings(xnnpack_settings); tflite_settings_builder.add_delegate(Delegate_XNNPACK); flatbuffers::Offset<TFLiteSettings> tflite_settings = tflite_settings_builder.Finish(); flatbuffer_builder_.Finish(tflite_settings); tflite_settings_ = flatbuffers::GetRoot<TFLiteSettings>( flatbuffer_builder_.GetBufferPointer()); delegate_plugin_ = delegates::DelegatePluginRegistry::CreateByName( "StableDelegatePlugin", *tflite_settings_); ASSERT_NE(delegate_plugin_, nullptr); } void TearDown() override { delegate_plugin_.reset(); } protected: flatbuffers::FlatBufferBuilder flatbuffer_builder_; const TFLiteSettings *tflite_settings_; std::unique_ptr<delegates::DelegatePluginInterface> delegate_plugin_; }; TEST_F(StableDelegatePluginTest, CanCreateAndDestroyDelegate) { delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create(); EXPECT_NE(delegate, nullptr); } TEST_F(StableDelegatePluginTest, CanGetDelegateErrno) { delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create(); EXPECT_EQ(delegate_plugin_->GetDelegateErrno(delegate.get()), 0); } TEST_F(StableDelegatePluginTest, SetsCorrectThreadCount) { delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create(); pthreadpool_t threadpool = static_cast<pthreadpool_t>( TfLiteXNNPackDelegateGetThreadPool(delegate.get())); EXPECT_EQ(pthreadpool_get_threads_count(threadpool), kNumThreadsForTest); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c396709c-ef47-4f4e-8cf4-224ba0257c00
cpp
tensorflow/tensorflow
composite_device
tensorflow/core/common_runtime/composite_device.cc
tensorflow/core/common_runtime/composite_device_test.cc
#include "tensorflow/core/common_runtime/composite_device.h" #include "absl/strings/str_join.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { const char* const kCompositeDeviceType = "COMPOSITE"; std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice( const std::vector<string>& underlying_devices, const int unique_device_id, const DeviceNameUtils::ParsedName& host_name, Status* status) { DeviceNameUtils::ParsedName parsed_name = host_name; parsed_name.type = kCompositeDeviceType; parsed_name.id = unique_device_id; const string device_name = DeviceNameUtils::ParsedNameToString(parsed_name); return CompositeDevice::MakeDevice(underlying_devices, device_name, status); } std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice( const std::vector<string>& underlying_devices, const string& device_name, Status* status) { if (underlying_devices.empty()) { status->Update( errors::InvalidArgument("underlying_devices should not be empty.")); return nullptr; } DeviceNameUtils::ParsedName parsed_name; if (!DeviceNameUtils::ParseFullName(underlying_devices.at(0), &parsed_name)) { status->Update(tensorflow::errors::InvalidArgument( "Cannot parse device name ", underlying_devices.at(0), " when creating CompositeDevice.")); return nullptr; } const string& underlying_type = parsed_name.type; for (int i = 1; i < underlying_devices.size(); ++i) { DeviceNameUtils::ParsedName name; if (!DeviceNameUtils::ParseFullName(underlying_devices.at(i), &name)) { status->Update(tensorflow::errors::InvalidArgument( "Cannot parse device name ", underlying_devices.at(i), " when creating CompositeDevice.")); return nullptr; } if (name.type != underlying_type) { status->Update(tensorflow::errors::InvalidArgument( "Expect device type ", parsed_name.type, "; but got type ", name.type, " from device: ", underlying_devices.at(i), " when creating CompositeDevice.")); return nullptr; } } DeviceAttributes device_attributes; device_attributes.set_name(device_name); device_attributes.set_device_type(kCompositeDeviceType); return absl::WrapUnique( new CompositeDevice(device_attributes, underlying_devices)); } }
#include "tensorflow/core/common_runtime/composite_device.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { TEST(CompositeDeviceTest, Basic) { const string host_name = "/job:localhost/replica:0/task:0/device:CPU:0"; DeviceNameUtils::ParsedName parsed_host_name; EXPECT_TRUE(DeviceNameUtils::ParseFullName(host_name, &parsed_host_name)); std::vector<string> underlying_devices; { Status status; std::unique_ptr<CompositeDevice> composite_device = CompositeDevice::MakeDevice(underlying_devices, 0, parsed_host_name, &status); EXPECT_EQ(composite_device, nullptr); EXPECT_EQ(error::INVALID_ARGUMENT, status.code()); EXPECT_TRUE(absl::StrContains(status.message(), "underlying_devices should not be empty")) << status.ToString(); } { Status status; underlying_devices.push_back( "/job:localhost/replica:0/task:0/device:CPU:0"); underlying_devices.push_back( "/job:localhost/replica:0/task:0/device:CPU:1"); std::unique_ptr<CompositeDevice> composite_device = CompositeDevice::MakeDevice(underlying_devices, 0, parsed_host_name, &status); TF_ASSERT_OK(status); EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType); EXPECT_EQ(underlying_devices, *composite_device->underlying_devices()); } { Status status; underlying_devices.push_back( "/job:localhost/replica:0/task:0/device:GPU:0"); std::unique_ptr<CompositeDevice> composite_device = CompositeDevice::MakeDevice(underlying_devices, 1, parsed_host_name, &status); EXPECT_EQ(composite_device, nullptr); EXPECT_EQ(error::INVALID_ARGUMENT, status.code()); EXPECT_TRUE(absl::StrContains(status.message(), "Expect device type CPU; but got type GPU")) << status.ToString(); } } TEST(CompositeDeviceTest, DeviceName) { const string composite_device_name = "/job:localhost/replica:0/task:0/device:CPU:10"; std::vector<string> underlying_devices; underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:0"); underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:1"); Status status; std::unique_ptr<CompositeDevice> composite_device = CompositeDevice::MakeDevice(underlying_devices, composite_device_name, &status); TF_ASSERT_OK(status); EXPECT_EQ(composite_device->name(), composite_device_name); EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType); EXPECT_EQ(underlying_devices, *composite_device->underlying_devices()); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
53602328-c7cf-4c6d-8ee9-b9779b3bed8b
cpp
tensorflow/tensorflow
ar_crs_combiner
third_party/xla/xla/service/ar_crs_combiner.cc
third_party/xla/xla/service/ar_crs_combiner_test.cc
#include "xla/service/ar_crs_combiner.h" #include <algorithm> #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/call_graph.h" #include "xla/service/hlo_replication_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<bool> ReplaceReplicatedAllReduce(HloModule* module, int64_t partition_count) { TF_ASSIGN_OR_RETURN( auto replication_analysis, HloReplicationAnalysis::Run(module, true)); bool changed = false; int64_t next_channel = hlo_query::NextChannelId(*module); for (auto computation : module->computations()) { for (auto instruction : computation->instructions()) { if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) { const Shape& shape = ar->shape(); if (ar->channel_id()) { continue; } if (ar->replica_groups().size() > 1) { continue; } if (shape.IsTuple() || shape.element_type() != F32) { continue; } if (module->config().replica_count() < 8 * partition_count) { continue; } if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) { VLOG(2) << "Replaced replicated all-reduce:" << ar->ToString(); ar->set_channel_id(next_channel++); auto divisor = computation->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::CreateR0<float>(partition_count))); auto bcast = computation->AddInstruction( HloInstruction::CreateBroadcast(shape, divisor, {})); auto div = computation->AddInstruction(HloInstruction::CreateBinary( ar->shape(), HloOpcode::kDivide, ar, bcast)); TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div)); changed = true; } } } } return changed; } bool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) { auto all_reduce = Cast<HloAllReduceInstruction>(hlo); auto replica_groups = all_reduce->replica_groups(); const int64_t replica_count = hlo->GetModule()->config().replica_count(); CHECK(all_reduce->IsCrossModuleAllReduce()); if (all_reduce->use_global_device_ids()) { if (replica_groups.size() != replica_count) { return false; } for (const auto& group : replica_groups) { if (group.replica_ids_size() != num_partitions) { return false; } absl::flat_hash_set<int64_t> partition_ids; int64_t replica_id = group.replica_ids(0) / num_partitions; for (int64_t i = 0; i < num_partitions; ++i) { if (group.replica_ids(i) / num_partitions != replica_id) { return false; } partition_ids.insert(group.replica_ids(i) % num_partitions); } if (partition_ids.size() != num_partitions) { return false; } } return true; } return replica_groups.size() == replica_count; } } namespace m = match; std::optional<ArCrsCombiner::ArCrsPair> ArCrsCombiner::MatchesArCrsPattern( HloInstruction* instruction) { auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool { if (instruction->user_count() != 1) { return false; } switch (instruction->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kTranspose: case HloOpcode::kReshape: return true; case HloOpcode::kConvert: return ShapeUtil::ElementIsFloating(instruction->shape()) == ShapeUtil::ElementIsFloating(instruction->operand(0)->shape()); case HloOpcode::kAdd: case HloOpcode::kSubtract: case HloOpcode::kMultiply: return ShapeUtil::ElementIsFloating(instruction->shape()); default: return false; } }; auto computation_is_addition = [](HloComputation* c) { return c->instruction_count() == 3 && Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter())); }; if (instruction->IsCrossModuleAllReduce() && HasCombinableReplicaGroup(instruction, num_spatial_partitions_) && computation_is_addition(instruction->called_computations()[0]) && instruction->user_count() == 1) { auto next = instruction->users()[0]; int64_t distance = 1; while (!next->IsCrossReplicaAllReduce()) { if (can_ar_move_past_instruction(next)) { next = next->users()[0]; } else { return std::nullopt; } ++distance; } if (!Cast<HloAllReduceInstruction>(next)->IsNoop() && computation_is_addition(next->called_computations()[0])) { ArCrsPair pair(instruction, next, distance); VLOG(2) << "ArCrsPair matching pattern: " << pair.ToString(); return pair; } } return std::nullopt; } std::optional<HloInstruction*> ArCrsCombiner::WhileFromBodyParameter( HloInstruction* instruction) { CHECK_EQ(HloOpcode::kParameter, instruction->opcode()); HloComputation* computation = instruction->parent(); auto caller_instructions = call_graph_->GetComputationCallers(computation); if (caller_instructions.size() == 1) { auto caller_instruction = caller_instructions[0]; if (caller_instruction->opcode() == HloOpcode::kWhile) { return caller_instruction; } } return std::nullopt; } std::optional<HloInstruction*> ArCrsCombiner::ConditionalFromBodyParameter( HloInstruction* instruction) { CHECK_EQ(HloOpcode::kParameter, instruction->opcode()); HloComputation* computation = instruction->parent(); auto caller_instructions = call_graph_->GetComputationCallers(computation); if (caller_instructions.size() == 1) { auto caller_instruction = caller_instructions[0]; if (caller_instruction->opcode() == HloOpcode::kConditional) { return caller_instruction; } } return std::nullopt; } std::optional<std::vector<HloInstruction*>> ArCrsCombiner::GetAllTuples( HloInstruction* instruction, absl::flat_hash_set<HloInstruction*>* visited) { if (visited->find(instruction) != visited->end()) { return std::vector<HloInstruction*>(); } visited->insert(instruction); switch (instruction->opcode()) { case HloOpcode::kTuple: { return std::vector<HloInstruction*>({instruction}); } case HloOpcode::kDomain: { return GetAllTuples(instruction->operands()[0], visited); } case HloOpcode::kParameter: { auto maybe_while = WhileFromBodyParameter(instruction); if (maybe_while) { auto while_instr = *maybe_while; auto init_tuples = GetAllTuples(while_instr->while_init(), visited); auto body_tuples = GetAllTuples( while_instr->while_body()->root_instruction(), visited); if (!init_tuples || !body_tuples) { return std::nullopt; } auto result = *init_tuples; result.insert(result.end(), body_tuples->begin(), body_tuples->end()); return result; } auto maybe_conditional = ConditionalFromBodyParameter(instruction); if (maybe_conditional) { auto cond_instr = *maybe_conditional; std::vector<HloInstruction*> tuples; for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) { if (cond_instr->branch_computation(i)->parameter_instruction(0) == instruction) { auto branch_tuples = GetAllTuples(cond_instr->mutable_operand(i + 1), visited); if (!branch_tuples) { return std::nullopt; } tuples.insert(tuples.end(), branch_tuples->begin(), branch_tuples->end()); } } return tuples; } return std::nullopt; } case HloOpcode::kGetTupleElement: { std::vector<HloInstruction*> result_tuples; auto tuples = GetAllTuples(instruction->operands()[0], visited); if (!tuples) { return std::nullopt; } for (auto tuple : *tuples) { auto tmp_tuples = GetAllTuples( tuple->mutable_operand(instruction->tuple_index()), visited); if (!tmp_tuples) { return std::nullopt; } result_tuples.insert(result_tuples.end(), tmp_tuples->begin(), tmp_tuples->end()); } return result_tuples; } case HloOpcode::kConditional: { std::vector<HloInstruction*> result_tuples; const auto& branch_computations = instruction->branch_computations(); result_tuples.reserve(branch_computations.size()); for (HloComputation* body : branch_computations) { if (body->root_instruction()->opcode() != HloOpcode::kTuple) { return std::nullopt; } result_tuples.push_back(body->root_instruction()); } return result_tuples; } case HloOpcode::kWhile: { auto init_tuples = GetAllTuples(instruction->while_init(), visited); auto body_tuples = GetAllTuples(instruction->while_body()->root_instruction(), visited); if (!init_tuples || !body_tuples) { return std::nullopt; } auto result = *init_tuples; result.insert(result.end(), body_tuples->begin(), body_tuples->end()); return result; } default: return std::nullopt; } } bool ArCrsCombiner::TupleElementsComputeSameValue( HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2, absl::flat_hash_map<int64_t, int64_t>* visited_pairs) { absl::flat_hash_set<HloInstruction*> visited; auto tuples = GetAllTuples(tuple_shaped_instruction, &visited); if (!tuples) { return false; } for (auto tuple : *tuples) { CHECK_EQ(tuple->opcode(), HloOpcode::kTuple); if (!InstructionsComputeSameValue(tuple->mutable_operand(i1), tuple->mutable_operand(i2), visited_pairs)) { return false; } } return true; } bool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1, HloInstruction* i2) { ArCrsCombiner combiner(2, false); auto module = i1->GetModule(); CHECK_EQ(module, i2->GetModule()); combiner.call_graph_ = CallGraph::Build(module); absl::flat_hash_map<int64_t, int64_t> visited_pairs; return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs); } bool ArCrsCombiner::InstructionsComputeSameValue( HloInstruction* i1, HloInstruction* i2, absl::flat_hash_map<int64_t, int64_t>* visited_pairs) { if (i1 == i2) { return true; } auto uid1 = i1->unique_id(); auto uid2 = i2->unique_id(); auto min_uid = std::min(uid1, uid2); auto max_uid = std::max(uid1, uid2); auto it = visited_pairs->find(min_uid); if (it != visited_pairs->end() && max_uid == it->second) { return true; } auto opcode1 = i1->opcode(); auto operands1 = i1->operands(); if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) { return false; } auto eq_computations = [](const HloComputation* a, const HloComputation* b) { return *a == *b; }; auto eq_operands = [](const HloInstruction*, const HloInstruction*) { return true; }; if (i1->IsCrossModuleAllReduce()) { return i1->Identical(*i2, eq_operands, eq_computations, false); } visited_pairs->emplace(min_uid, max_uid); for (int i = 0; i < operands1.size(); ++i) { auto operand1 = operands1[i]; auto operand2 = i2->operands()[i]; if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) { return false; } } if (opcode1 == HloOpcode::kParameter) { return false; } if (opcode1 == HloOpcode::kGetTupleElement) { return i1->tuple_index() == i2->tuple_index() || TupleElementsComputeSameValue(operands1[0], i1->tuple_index(), i2->tuple_index(), visited_pairs); } auto eq_instructions = [](const HloInstruction* i1, const HloInstruction* i2) -> bool { return true; }; return i1->Identical(*i2, eq_instructions, eq_computations, false); } void ArCrsCombiner::GroupAllReducesById(HloModule* module) { absl::flat_hash_set<int64_t> discarded_ar_ids; for (HloComputation* computation : module->MakeNonfusionComputations()) { for (HloInstruction* instruction : computation->instructions()) { auto maybe_pair = MatchesArCrsPattern(instruction); if (maybe_pair) { auto pair = *maybe_pair; int64_t ar_id = *(instruction->channel_id()); if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) { continue; } auto it = crs_reserved_map_.find(pair.crs); if (it != crs_reserved_map_.end()) { auto prev_ar_id = it->second; CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end()); CHECK_NE(prev_ar_id, ar_id); auto prev_pair = all_reduce_map_[prev_ar_id].back(); int64_t prev_distance = prev_pair.distance; if (prev_distance < pair.distance) { VLOG(2) << "Replacing ArCrsPair: " << prev_pair.ToString() << " with ArCrsPair: " << pair.ToString(); all_reduce_map_.erase(prev_ar_id); discarded_ar_ids.insert(prev_ar_id); all_reduce_map_[ar_id].push_back(pair); crs_reserved_map_[pair.crs] = ar_id; } else { discarded_ar_ids.insert(ar_id); } } else { if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) { int64_t prev_distance = all_reduce_map_[ar_id].back().distance; CHECK_EQ(prev_distance, pair.distance) << "All ARs with the same AR ID must have the same distance " "from the corresponding CRSs. Found: " << prev_distance << " and " << pair.distance; } all_reduce_map_[ar_id].push_back(pair); crs_reserved_map_[pair.crs] = ar_id; } } } } } absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() { for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) { auto copy_it = it++; auto channel_id = copy_it->first; VLOG(2) << "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: " << channel_id << "\n"; auto pairs_vec = copy_it->second; TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_); auto instr_0 = pairs_vec[0].ar; for (int i = 1; i < pairs_vec.size(); ++i) { auto instr_i = pairs_vec[i].ar; auto next_0 = instr_0->users()[0]; auto next_i = instr_i->users()[0]; absl::flat_hash_map<int64_t, int64_t> visited_pairs; while (true) { if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) { all_reduce_map_.erase(copy_it); VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce " "channel id: " << channel_id << "\n"; break; } if (next_0->IsCrossReplicaAllReduce()) { break; } next_0 = next_0->users()[0]; next_i = next_i->users()[0]; } } } return absl::OkStatus(); } absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD( HloModule* module) { TF_ASSIGN_OR_RETURN( auto replication_analysis, HloReplicationAnalysis::Run(module, true)); for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) { auto copy_it = it++; auto channel_id = copy_it->first; VLOG(2) << "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: " << channel_id << "\n"; auto pairs_vec = copy_it->second; TF_RET_CHECK(pairs_vec.size() == 1); auto instr = pairs_vec[0].ar; auto next = instr->users()[0]; while (true) { TF_RET_CHECK(next->shape().IsArray()); if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) { all_reduce_map_.erase(copy_it); VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce " "channel id: " << channel_id << "\n"; break; } if (next->IsCrossReplicaAllReduce()) { break; } next = next->users()[0]; } } return absl::OkStatus(); } absl::StatusOr<bool> ArCrsCombiner::RewriteGraph() { if (all_reduce_map_.empty()) { return false; } for (const auto& it : all_reduce_map_) { auto pairs_vec = it.second; for (auto pair : pairs_vec) { auto all_reduce = pair.ar; auto parent_computation = all_reduce->parent(); auto channel_id = all_reduce->channel_id(); auto prev = all_reduce->mutable_operand(0); auto next = all_reduce->users()[0]; TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev)); TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce)); while (!next->IsCrossReplicaAllReduce()) { switch (next->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kTranspose: case HloOpcode::kReshape: case HloOpcode::kConvert: case HloOpcode::kMultiply: break; case HloOpcode::kAdd: case HloOpcode::kSubtract: { auto other_operand = (next->operands()[0] == prev) ? next->operands()[1] : next->operands()[0]; if (other_operand->IsCrossModuleAllReduce() && other_operand->user_count() == 1) { TF_CHECK_OK(other_operand->ReplaceAllUsesWith( other_operand->mutable_operand(0))); } else { auto shape = other_operand->shape(); Literal lit(shape); lit.PopulateWithValue<float>(num_spatial_partitions_); auto divisor = parent_computation->AddInstruction( HloInstruction::CreateConstant(lit.Clone())); auto division = parent_computation->AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kDivide, other_operand, divisor)); TF_CHECK_OK(other_operand->ReplaceUseWith(next, division)); } break; } default: LOG(FATAL) << "Unexpected instruction: " << next->ToShortString(); } prev = next; next = next->users()[0]; } next->set_channel_id(channel_id); } } return true; } absl::StatusOr<bool> ArCrsCombiner::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { call_graph_ = CallGraph::Build(module); GroupAllReducesById(module); if (spmd_partition_) { TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module)); } else { TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD()); } TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph()); if (module->config().replica_count() > 1 && spmd_partition_) { TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce( module, num_spatial_partitions_)); changed |= replaced; } return changed; } }
#include "xla/service/ar_crs_combiner.h" #include <cstdint> #include <memory> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ArCrsCombinerTest : public HloTestBase {}; TEST_F(ArCrsCombinerTest, SameValueTestBasecase) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}}) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue( i1, module->entry_computation()->parameter_instruction(0))); EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestBasecase2) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) { %x = f32[] parameter(0) ROOT %tuple = (f32[], f32[]) tuple(%x, %x) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestBasecase3) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %tuple = (f32[], f32[]) tuple(%x, %y) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestNumOperands) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple1 = (f32[2,2]) tuple(%constant.f32) %tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) { %p = f32[2] parameter(0) %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]} %slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]} ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) { %p = f32[2] parameter(0) %slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]} %slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]} ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) { const char* module_str = R"( HloModule foobar ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) { %p = f32[2,2] parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}}) %tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) %get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1 ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_tuple = module->entry_computation()->root_instruction(); auto i1 = root_tuple->operands()[0]; auto i2 = root_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile1) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]; auto i2 = body_tuple->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile2) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}}) %constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]; auto i2 = body_tuple->operands()[1]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestWhile3) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.0 = s32[] constant(0) %constant.1 = s32[] constant(1) ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT } %body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}}) %constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}}) %get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0 %get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1) %add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto body_tuple = root_while->while_body()->root_instruction(); auto i1 = body_tuple->operands()[0]->operands()[0]; auto i2 = body_tuple->operands()[1]->operands()[0]; EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } TEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) { const char* module_str = R"( HloModule foobar %condition (x: (f32[2,2], f32[2,2])) -> pred[] { %x = (f32[2,2], f32[2,2]) parameter(0) ROOT %t = pred[] constant(true) } %body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}}) %gte.1 = f32[2,2] get-tuple-element(%x), index=0 %gte.2 = f32[2,2] get-tuple-element(%x), index=1 %add.1 = f32[2,2] add(%gte.1, %constant.f32) %add.2 = f32[2,2] add(%gte.2, %constant.f32) ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2) } %body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) { %x = (f32[2,2], f32[2,2]) parameter(0) %gte.1 = f32[2,2] get-tuple-element(%x), index=0 %gte.2 = f32[2,2] get-tuple-element(%x), index=1 %init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2) ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition, body=%body_inner } ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) { %constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}}) %init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32) ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body_outer } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto root_while = module->entry_computation()->root_instruction(); auto inner_while = root_while->while_body()->root_instruction(); auto i1 = inner_while->while_body()->root_instruction()->operands()[0]; auto i2 = inner_while->while_body()->root_instruction()->operands()[1]; EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2)); } void CompareReplicaGroups(absl::Span<const ReplicaGroup> groups_before, absl::Span<const ReplicaGroup> groups_after) { ASSERT_EQ(groups_before.size(), groups_after.size()); for (int i = 0; i < groups_before.size(); ++i) { auto group_before = groups_before[i]; std::vector<int64_t> ids_before(group_before.replica_ids().begin(), group_before.replica_ids().end()); auto group_after = groups_after[i]; std::vector<int64_t> ids_after(group_after.replica_ids().begin(), group_after.replica_ids().end()); EXPECT_EQ(ids_before, ids_after); } } TEST_F(ArCrsCombinerTest, RewriteArConvertCrs) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%convert.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Convert(op::Parameter())), op::AllReduce(op::Convert(op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Convert(op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) { const char* module_str = R"( HloModule foobar %sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] { %a = f32[2,1] parameter(0) %b = f32[2,1] parameter(1) ROOT %add = f32[2,1] add(%a, %b) } %sum.2 (x: f32[2], y: f32[2]) -> f32[2] { %x = f32[2] parameter(0) %y = f32[2] parameter(1) ROOT %add = f32[2] add(%x, %y) } ENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) { %p = f32[2,1] parameter(0) %all-reduce.ar.1 = f32[2,1] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=0} %bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1) %all-reduce.1 = f32[2] all-reduce(%bitcast.1), replica_groups={{0,1}}, to_apply=%sum.2, sharding={maximal device=0} %all-reduce.ar.2 = f32[2,1] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=1} %bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2) %all-reduce.2 = f32[2] all-reduce(%bitcast.2), replica_groups={{0,1}}, to_apply=%sum.2, sharding={maximal device=1} ROOT %tuple = (f32[2], f32[2]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())), op::AllReduce(op::Bitcast(op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=0} %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=1} %multiply.2 = f32[] multiply(%all-reduce.ar.2, %constant.f32), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%multiply.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())), op::AllReduce(op::Multiply(op::Parameter(), op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32 %multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32) %all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32, %convert.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %add.2 = f32[] add(%constant.f32, %convert.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%add.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple( op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()), op::Convert())), op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()), op::Convert())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32, %convert.1) %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Convert())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32.1 = f32[] constant(2) %constant.f32.2 = f32[] constant(3) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %add.1 = f32[] add(%constant.f32.1, %convert.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %add.2 = f32[] add(%constant.f32.2, %convert.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%add.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.bf16 = bf16[] constant(1) %constant.f32.1 = f32[] constant(2) %all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %add.1 = f32[] add(%p, %convert.1) %all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) { const char* module_str = R"( HloModule foobar %sum.1 (a: f32[], b: f32[]) -> f32[] { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%all-reduce.ar.1), replica_groups={{0,1}}, to_apply=%sum.1, sharding={maximal device=0} %multiply.1 = f32[] multiply(%all-reduce.1, %constant.f32), sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.1, sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%all-reduce.ar.2), replica_groups={{0,1}}, to_apply=%sum.1, sharding={maximal device=1} %multiply.2 = f32[] multiply(%all-reduce.2, %constant.f32), sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Parameter()), op::AllReduce(op::Parameter()))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleAdds) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.1 = f32[] constant(1) %constant.2 = f32[] constant(2) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add.11 = f32[] add(%constant.1, %all-reduce.ar.1), sharding={maximal device=0} %add.12 = f32[] add(%constant.2, %add.11), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add.21 = f32[] add(%constant.1, %all-reduce.ar.2), sharding={maximal device=0} %add.22 = f32[] add(%constant.2, %add.21), sharding={maximal device=0} %all-reduce.2 = f32[] all-reduce(%add.22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))), op::AllReduce(op::Add( op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.1 = f32[] constant(1) %constant.2 = f32[] constant(2) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %add.11 = f32[] add(%constant.1, %all-reduce.ar.1) %add.12 = f32[] add(%constant.2, %add.11) %all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce( op::Add(op::Divide(op::Constant(), op::Constant()), op::Add(op::Divide(op::Constant(), op::Constant()), op::Parameter()))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=0} %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32, sharding={maximal device=1} %sub.2 = f32[] subtract(%constant.f32, %all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%sub.2), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple( op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()), op::Parameter())), op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %constant.f32 = f32[] constant(123) %all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum.f32 %sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Subtract( op::Divide(op::Constant(), op::Constant()), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %add11 = f32[] add(%ar11, %const1), sharding={maximal device=0} %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=0} %add12 = f32[] add(%add11, %ar12), sharding={maximal device=0} %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %ar21 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=1} %add21 = f32[] add(%ar21, %const1), sharding={maximal device=1} %ar22 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=1} %add22 = f32[] add(%add21, %ar22), sharding={maximal device=1} %crs2 = f32[] all-reduce(%add22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%crs1, %crs2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())), op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %add11 = f32[] add(%ar11, %const1) %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum %add12 = f32[] add(%add11, %ar12) %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%crs1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())), op::Parameter())))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=0} %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=0} %add11 = f32[] add(%ar12, %const1), sharding={maximal device=0} %add12 = f32[] add(%ar11, %add11), sharding={maximal device=0} %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=0} %ar21 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum, sharding={maximal device=1} %ar22 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum, sharding={maximal device=1} %add21 = f32[] add(%ar22, %const1), sharding={maximal device=1} %add22 = f32[] add(%ar21, %add21), sharding={maximal device=1} %crs2 = f32[] all-reduce(%add22), replica_groups={{0,1}}, to_apply=%sum, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%crs1, %crs2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))), op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) { const char* module_str = R"( HloModule foobar %sum (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[]) -> (f32[]) { %p = f32[] parameter(0) %const1 = f32[] constant(1) %const2 = f32[] constant(2) %ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum %ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum %add11 = f32[] add(%ar12, %const1) %add12 = f32[] add(%ar11, %add11) %crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum ROOT %tuple = (f32[]) tuple(%crs1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); auto crs_before = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_before = crs_before->replica_groups(); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::AllReduce(op::Add( op::Parameter(), op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())))))); auto crs_after = module->entry_computation()->root_instruction()->operands()[0]; auto replica_groups_after = crs_after->replica_groups(); CompareReplicaGroups(replica_groups_before, replica_groups_after); } TEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=0} %convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.ar.2 = bf16[] all-reduce(%constant.bf16), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16, sharding={maximal device=1} %convert.2 = f32[] convert(%all-reduce.ar.2), sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%convert.2), replica_groups={{0}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.1, %all-reduce.2), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 1)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) { const char* module_str = R"( HloModule foobar %sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] { %a = bf16[] parameter(0) %b = bf16[] parameter(1) ROOT %add = bf16[] add(%a, %b) } %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %constant.bf16 = bf16[] constant(1) %all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}}, channel_id=1, to_apply=%sum.bf16 %convert.1 = f32[] convert(%all-reduce.ar.1) %all-reduce.1 = f32[] all-reduce(%convert.1), replica_groups={{0}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.1) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 1)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, SameValueTestConditional) { const char* module_str = R"( HloModule foobar branch_true { pt = (f32[2,4], f32[2,4]) parameter(0) gte.0 = f32[2,4] get-tuple-element(pt), index=0 gte.1 = f32[2,4] get-tuple-element(pt), index=1 ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0) } branch_false { pf = (f32[2,4], f32[2,4]) parameter(0) gte.0 = f32[2,4] get-tuple-element(pf), index=0 gte.1 = f32[2,4] get-tuple-element(pf), index=1 add = f32[2,4] add(gte.1, gte.1) ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add) } ENTRY Parameters1.v4 { constant = pred[] constant(true) p = f32[2,4] parameter(0) tuple = (f32[2,4], f32[2,4]) tuple(p, p) ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); auto cond = module->entry_computation()->root_instruction(); auto branch_true = cond->branch_computation(0)->root_instruction(); auto t0 = branch_true->mutable_operand(0); auto t1 = branch_true->mutable_operand(1); EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1)); auto branch_false = cond->branch_computation(1)->root_instruction(); auto f0 = branch_false->mutable_operand(0); auto f1 = branch_false->mutable_operand(1); EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1)); } TEST_F(ArCrsCombinerTest, AllReduceWithReplicas) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=0} %all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}}, to_apply=%sum.f32, sharding={maximal device=1} ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3), sharding={{maximal device=0}, {maximal device=1}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, false); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}}, to_apply=%sum.f32 %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_FALSE(changed); } TEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] { %p = f32[2,4] parameter(0), sharding={replicated} ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32, replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 32)); ArCrsCombiner combiner(2, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()), op::Broadcast(op::Constant()))); auto ar = root->operand(0); auto divisor = root->operand(1)->operand(0); EXPECT_TRUE(ar->channel_id()); EXPECT_TRUE(divisor->literal().IsAllFloat(2)); } TEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) { const char* module_str = R"( HloModule foobar %sum.f32 (x: f32[], y: f32[]) -> f32[] { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %add = f32[] add(%x, %y) } ENTRY %entrycomp (p: bf16[]) -> (f32[]) { %p = bf16[] parameter(0) %all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true, to_apply=%sum.f32 %all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}}, to_apply=%sum.f32 ROOT %tuple = (f32[]) tuple(%all-reduce.2) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str, 2, 4)); ArCrsCombiner combiner(4, true); auto changed = combiner.Run(module.get()).value(); EXPECT_TRUE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ff6e614c-d7f4-4756-bc9e-cd60d9819967
cpp
tensorflow/tensorflow
data_transfer
tensorflow/core/data/service/data_transfer.cc
tensorflow/core/data/service/data_transfer_test.cc
#include "tensorflow/core/data/service/data_transfer.h" #include <functional> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "absl/strings/str_join.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/variant.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { namespace data { namespace { mutex* get_lock() { static mutex lock(LINKER_INITIALIZED); return &lock; } using DataTransferServerFactories = std::unordered_map<std::string, DataTransferServer::ServerFactoryT>; DataTransferServerFactories& transfer_server_factories() { static auto& factories = *new DataTransferServerFactories(); return factories; } using DataTransferClientFactories = std::unordered_map<std::string, DataTransferClient::ClientFactoryT>; DataTransferClientFactories& transfer_client_factories() { static auto& factories = *new DataTransferClientFactories(); return factories; } } GetElementResult GetElementResult::Copy() const { GetElementResult copy; copy.components = components; copy.element_index = element_index; copy.end_of_sequence = end_of_sequence; copy.skip = skip; return copy; } size_t GetElementResult::EstimatedMemoryUsageBytes() const { size_t size_bytes = components.size() * sizeof(Tensor) + sizeof(element_index) + sizeof(end_of_sequence) + sizeof(skip); for (const Tensor& tensor : components) { size_bytes += tensor.TotalBytes(); if (tensor.dtype() != DT_VARIANT) { continue; } const Variant& variant = tensor.scalar<Variant>()(); const CompressedElement* compressed = variant.get<CompressedElement>(); if (compressed) { size_bytes += compressed->SpaceUsedLong(); } } return size_bytes; } void DataTransferServer::Register(std::string name, ServerFactoryT factory) { mutex_lock l(*get_lock()); if (!transfer_server_factories().insert({name, factory}).second) { LOG(ERROR) << "Two data transfer server factories are being registered with name " << name << ". Which one gets used is undefined."; } } Status DataTransferServer::Build(std::string name, GetElementT get_element, std::shared_ptr<DataTransferServer>* out) { mutex_lock l(*get_lock()); auto it = transfer_server_factories().find(name); if (it != transfer_server_factories().end()) { return it->second(get_element, out); } std::vector<std::string> available_names; for (const auto& factory : transfer_server_factories()) { available_names.push_back(factory.first); } return errors::NotFound( "No data transfer server factory has been registered for name ", name, ". The available names are: [ ", absl::StrJoin(available_names, ", "), " ]"); } void DataTransferClient::Register(std::string name, ClientFactoryT factory) { mutex_lock l(*get_lock()); if (!transfer_client_factories().insert({name, factory}).second) { LOG(ERROR) << "Two data transfer client factories are being registered with name " << name << ". Which one gets used is undefined."; } } Status DataTransferClient::Build(std::string name, Config config, std::unique_ptr<DataTransferClient>* out) { mutex_lock l(*get_lock()); auto it = transfer_client_factories().find(name); if (it != transfer_client_factories().end()) { return it->second(config, out); } std::vector<string> available_names; for (const auto& factory : transfer_client_factories()) { available_names.push_back(factory.first); } return errors::NotFound( "No data transfer client factory has been registered for name ", name, ". The available names are: [ ", absl::StrJoin(available_names, ", "), " ]"); } } }
#include "tensorflow/core/data/service/data_transfer.h" #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace data { namespace { class TestDataTransferServer : public DataTransferServer { public: explicit TestDataTransferServer(bool* called) : called_(called) {} Status Start(const experimental::WorkerConfig& unused_config) override { *called_ = true; return absl::OkStatus(); } int Port() const override { return 0; } private: bool* called_; }; template <class T> GetElementResult MakeElementResult(T value) { GetElementResult result; result.components.push_back(Tensor(std::move(value))); result.element_index = 0; result.end_of_sequence = false; return result; } TEST(DataTransferTest, RegisterDataTransferServerBuilder) { bool called = false; DataTransferServer::Register("test", [&called](auto ignore, auto* server) { *server = std::make_shared<TestDataTransferServer>(&called); return absl::OkStatus(); }); std::shared_ptr<DataTransferServer> server; TF_ASSERT_OK(DataTransferServer::Build("test", {}, &server)); EXPECT_FALSE(called); TF_ASSERT_OK(server->Start({})); EXPECT_TRUE(called); } TEST(DataTransferTest, EstimateMemoryUsageBytes) { GetElementResult empty; EXPECT_GT(empty.EstimatedMemoryUsageBytes(), 0); Tensor tensor(DT_INT64, TensorShape({10, 100})); GetElementResult int64_result = MakeElementResult(tensor); EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), 1000 * sizeof(int64_t)); EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), int64_result.components[0].AllocatedBytes()); EXPECT_GE(int64_result.EstimatedMemoryUsageBytes(), sizeof(int64_result)); } TEST(DataTransferTest, EstimateVariantMemoryUsageBytes) { const size_t data_size = 1000; std::unique_ptr<CompressedElement> compressed{ protobuf::Arena::Create<CompressedElement>(nullptr)}; compressed->set_data(std::string(data_size, 'a')); Tensor tensor(DT_VARIANT, TensorShape({})); tensor.scalar<Variant>()() = *compressed; GetElementResult variant_result = MakeElementResult(tensor); EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), data_size); EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), compressed->ByteSizeLong()); EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), compressed->SpaceUsedLong()); } TEST(DataTransferTest, CopyGetElementResult) { std::string hello_world = "hello, world!"; GetElementResult result = MakeElementResult(hello_world); ASSERT_EQ(result.components.size(), 1); EXPECT_GT(result.EstimatedMemoryUsageBytes(), hello_world.size()); GetElementResult copy = result.Copy(); ASSERT_EQ(copy.components.size(), 1); test::ExpectEqual(result.components[0], copy.components[0]); EXPECT_EQ(copy.EstimatedMemoryUsageBytes(), result.EstimatedMemoryUsageBytes()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fca111e8-154f-4c10-bf20-b92bba5d67bb
cpp
google/arolla
while_loop_impl
arolla/expr/operators/while_loop/while_loop_impl.cc
arolla/expr/operators/while_loop/while_loop_impl_test.cc
#include "arolla/expr/operators/while_loop/while_loop_impl.h" #include <algorithm> #include <functional> #include <string> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_visitor.h" #include "arolla/expr/operators/while_loop/while_loop.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr_operators::while_loop_impl { using ::arolla::expr::ExprNodePtr; using ::arolla::expr::ExprOperatorPtr; using ::arolla::expr::Placeholder; absl::StatusOr<std::pair<ExprNodePtr, NamedExpressions>> ExtractImmutables( const ExprNodePtr& expr, std::function<std::string(const ExprNodePtr& node)> immutable_naming_function) { NamedExpressions immutables; struct Visit { ExprNodePtr expr; bool has_placeholder_dep; bool has_leaf_dep; }; ASSIGN_OR_RETURN( (auto [converted_expr, has_placeholder_dep, has_leaf_dep]), expr::PostOrderTraverse( expr, [&](const ExprNodePtr& node, absl::Span<const Visit* const> visits) -> absl::StatusOr<Visit> { if (node->is_placeholder()) { return Visit{.expr = node, .has_placeholder_dep = true, .has_leaf_dep = false}; } if (node->is_leaf()) { return Visit{.expr = node, .has_placeholder_dep = false, .has_leaf_dep = true}; } bool has_placeholder_dep = std::any_of( visits.begin(), visits.end(), [](const auto& v) { return v->has_placeholder_dep; }); bool has_leaf_dep = std::any_of(visits.begin(), visits.end(), [](const auto& v) { return v->has_leaf_dep; }); if (!has_placeholder_dep) { return Visit{.expr = node, .has_placeholder_dep = false, .has_leaf_dep = has_leaf_dep}; } std::vector<ExprNodePtr> new_deps; new_deps.reserve(visits.size()); for (const auto& visit : visits) { if (visit->has_placeholder_dep || !visit->has_leaf_dep) { new_deps.push_back(visit->expr); } else { auto placeholder_key = immutable_naming_function(visit->expr); new_deps.emplace_back(Placeholder(placeholder_key)); immutables.emplace(std::move(placeholder_key), visit->expr); } } ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies( node, std::move(new_deps))); return Visit{.expr = new_node, .has_placeholder_dep = true, .has_leaf_dep = has_leaf_dep}; })); if (!has_placeholder_dep) { DCHECK(immutables.empty()); auto placeholder_key = immutable_naming_function(converted_expr); immutables.emplace(placeholder_key, converted_expr); converted_expr = Placeholder(placeholder_key); } return {{std::move(converted_expr), std::move(immutables)}}; } }
#include "arolla/expr/operators/while_loop/while_loop_impl.h" #include <cstdint> #include <string> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/flat_hash_map.h" #include "absl/status/status_matchers.h" #include "absl/strings/str_format.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/testing/testing.h" #include "arolla/util/fingerprint.h" namespace arolla::expr_operators::while_loop_impl { namespace { using ::absl_testing::IsOkAndHolds; using ::arolla::expr::CallOp; using ::arolla::expr::ExprNodePtr; using ::arolla::expr::Leaf; using ::arolla::expr::Literal; using ::arolla::expr::Placeholder; using ::arolla::testing::EqualsExpr; using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; TEST(WhileLoopImplTest, ExtractImmutables) { absl::flat_hash_map<Fingerprint, std::string> immutable_names; auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string { if (auto it = immutable_names.find(node->fingerprint()); it != immutable_names.end()) { return it->second; } std::string name = absl::StrFormat("_immutable_%d", immutable_names.size()); immutable_names.emplace(node->fingerprint(), name); return name; }; { auto expr = Literal(int64_t{1}); EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair( EqualsExpr(Placeholder("_immutable_0")), UnorderedElementsAre(Pair( "_immutable_0", EqualsExpr(Literal<int64_t>(1))))))); } { auto expr = Leaf("fifty"); EXPECT_THAT( ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair(EqualsExpr(Placeholder("_immutable_1")), UnorderedElementsAre(Pair( "_immutable_1", EqualsExpr(Leaf("fifty"))))))); } { auto expr = Placeholder("seven"); EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty()))); } { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {Leaf("two"), CallOp("math.add", {Placeholder("fifty"), Leaf("seven")})})); EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair( EqualsExpr(CallOp( "math.add", {Placeholder("_immutable_3"), CallOp("math.add", {Placeholder("fifty"), Placeholder("_immutable_2")})})), UnorderedElementsAre( Pair("_immutable_3", EqualsExpr(Leaf("two"))), Pair("_immutable_2", EqualsExpr(Leaf("seven"))))))); } { ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Placeholder("fifty"), Literal<int64_t>(7)})); EXPECT_THAT( ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair(EqualsExpr(CallOp("math.add", {Placeholder("fifty"), Literal<int64_t>(7)})), IsEmpty()))); } { ASSERT_OK_AND_ASSIGN( auto expr57, CallOp("math.add", {Leaf("fifty"), Literal<int64_t>(7)})); ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {expr57, Placeholder("two")})); EXPECT_THAT( ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair( EqualsExpr(CallOp( "math.add", {Placeholder("_immutable_4"), Placeholder("two")})), UnorderedElementsAre(Pair("_immutable_4", EqualsExpr(expr57)))))); } { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {CallOp("math.add", {Placeholder("fifty"), Leaf("seven")}), Leaf("seven")})); EXPECT_THAT( ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair( EqualsExpr(CallOp( "math.add", {CallOp("math.add", {Placeholder("fifty"), Placeholder("_immutable_2")}), Placeholder("_immutable_2")})), UnorderedElementsAre( Pair("_immutable_2", EqualsExpr(Leaf("seven"))))))); } { ASSERT_OK_AND_ASSIGN( auto expr, CallOp("math.add", {CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}), Placeholder("seven")})); EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function), IsOkAndHolds(Pair( EqualsExpr(CallOp("math.add", {Placeholder("_immutable_5"), Placeholder("seven")})), UnorderedElementsAre(Pair( "_immutable_5", EqualsExpr(CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}))))))); } } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
8e9734e8-2cbe-4ac0-bf10-2ea388d9cbf8
cpp
tensorflow/tensorflow
gather_nd
tensorflow/lite/kernels/gather_nd.cc
tensorflow/lite/kernels/gather_nd_test.cc
#include <stdint.h> #include "tensorflow/lite/core/c/c_api_types.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace gather_nd { constexpr int kParams = 0; constexpr int kIndices = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* params; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params)); const TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (params->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: case kTfLiteInt64: case kTfLiteInt32: case kTfLiteString: case kTfLiteBool: break; default: TF_LITE_KERNEL_LOG(context, "Params of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(params->type)); return kTfLiteError; } switch (indices->type) { case kTfLiteInt64: case kTfLiteInt32: case kTfLiteInt16: break; default: TF_LITE_KERNEL_LOG(context, "Indices of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } const int params_rank = NumDimensions(params); const int indices_rank = NumDimensions(indices); const int indices_nd = SizeOfDimension(indices, indices_rank - 1); if (params_rank < 1) { TF_LITE_KERNEL_LOG(context, "Params must be at least a vector."); return kTfLiteError; } if (indices_rank < 1) { TF_LITE_KERNEL_LOG(context, "Indices must be at least a vector."); return kTfLiteError; } if (indices_nd > params_rank) { TF_LITE_KERNEL_LOG( context, "Index innermost dimension length must be <= params rank."); return kTfLiteError; } output->type = params->type; const int output_rank = indices_rank + params_rank - indices_nd - 1; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); int output_index = 0; for (int i = 0; i < indices_rank - 1; ++i) { output_shape->data[output_index++] = indices->dims->data[i]; } for (int i = indices_nd; i < params_rank; ++i) { output_shape->data[output_index++] = params->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } template <typename ParamsT, typename IndicesT> TfLiteStatus GatherNd(const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { return reference_ops::GatherNd( GetTensorShape(params), GetTensorData<ParamsT>(params), GetTensorShape(indices), GetTensorData<IndicesT>(indices), GetTensorShape(output), GetTensorData<ParamsT>(output)); } template <typename IndicesT> TfLiteStatus GatherNdString(const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { return reference_ops::GatherNdString( GetTensorShape(params), params, GetTensorShape(indices), GetTensorData<IndicesT>(indices), GetTensorShape(output), output); } template <typename IndicesT> TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params, const TfLiteTensor* indices, TfLiteTensor* output) { bool indices_has_only_positive_elements = true; const auto* indices_values = GetTensorData<IndicesT>(indices); const size_t num_indices = indices->bytes / sizeof(IndicesT); for (size_t i = 0; i < num_indices; i++) { if (indices_values[i] < 0) { indices_has_only_positive_elements = false; break; } } TF_LITE_ENSURE(context, indices_has_only_positive_elements); TfLiteStatus status = kTfLiteError; switch (params->type) { case kTfLiteFloat32: status = GatherNd<float, IndicesT>(params, indices, output); break; case kTfLiteUInt8: status = GatherNd<uint8_t, IndicesT>(params, indices, output); break; case kTfLiteInt8: status = GatherNd<int8_t, IndicesT>(params, indices, output); break; case kTfLiteInt16: status = GatherNd<int16_t, IndicesT>(params, indices, output); break; case kTfLiteInt32: status = GatherNd<int32_t, IndicesT>(params, indices, output); break; case kTfLiteInt64: status = GatherNd<int64_t, IndicesT>(params, indices, output); break; case kTfLiteString: status = GatherNdString<IndicesT>(params, indices, output); break; case kTfLiteBool: status = GatherNd<bool, IndicesT>(params, indices, output); break; default: TF_LITE_KERNEL_LOG(context, "Params type '%s' are not supported by gather_nd.", TfLiteTypeGetName(params->type)); return kTfLiteError; } if (status != kTfLiteOk) { TF_LITE_KERNEL_LOG(context, "gather_nd index out of bounds"); } return status; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* params; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, &params)); const TfLiteTensor* indices; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE(context, (NumElements(params) == 0 && NumElements(indices) == 0) || NumElements(params) > 0); switch (indices->type) { case kTfLiteInt16: return EvalGatherNd<int16_t>(context, params, indices, output); case kTfLiteInt32: return EvalGatherNd<int32_t>(context, params, indices, output); case kTfLiteInt64: return EvalGatherNd<int64_t>(context, params, indices, output); default: TF_LITE_KERNEL_LOG(context, "Indices of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } } } TfLiteRegistration* Register_GATHER_ND() { static TfLiteRegistration r = { nullptr, nullptr, gather_nd::Prepare, gather_nd::Eval}; return &r; } } } }
#include <stdint.h> #include <initializer_list> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "flatbuffers/flatbuffers.h" #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/schema/schema_generated.h" #include "tensorflow/lite/string_type.h" namespace tflite { namespace { using ::testing::ElementsAreArray; class GatherNdOpModel : public SingleOpModel { public: GatherNdOpModel(const TensorData& params, const TensorData& indices) { params_ = AddInput(params); indices_ = AddInput(indices); output_ = AddOutput(params.type); SetBuiltinOp(BuiltinOperator_GATHER_ND, BuiltinOptions_GatherNdOptions, CreateGatherNdOptions(builder_).Union()); BuildInterpreter({GetShape(params_), GetShape(indices_)}); } template <typename T> void SetInput(std::initializer_list<T> data) { PopulateTensor<T>(params_, data); } template <typename T> void SetPositions(std::initializer_list<T> data) { PopulateTensor<T>(indices_, data); } template <typename T> std::vector<T> GetOutput() { return ExtractVector<T>(output_); } std::vector<int> GetOutputShape() { return GetTensorShape(output_); } protected: int params_; int indices_; int output_; }; TEST(GatherNdOpTest, ElementIndexingIntoMatrix) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({0, 0, 1, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2})); } TEST(GatherNdOpTest, ErrorOnOutOfBoundsTooLarge) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({0, 0, 2, 0}); EXPECT_EQ(m.Invoke(), kTfLiteError); m.SetPositions<int32_t>({0, 0, 1, 2}); EXPECT_EQ(m.Invoke(), kTfLiteError); } TEST(GatherNdOpTest, ErrorOnOutOfBoundsNegative) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({1, -1, 1, 1}); EXPECT_EQ(m.Invoke(), kTfLiteError); } TEST(GatherNdOpTest, SliceIndexingIntoMatrix) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2})); } TEST(GatherNdOpTest, BatchedIndexingIntoMatrix1) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1, 1}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2})); } TEST(GatherNdOpTest, BatchedIndexingIntoMatrix2) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1, 2}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({0, 0, 1, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2})); } TEST(GatherNdOpTest, DuplicateIndexingIntoMatrix) { GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, 1.2, 2.1, 2.2}); m.SetPositions<int32_t>({0, 0, 0, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 1.1})); } TEST(GatherNdOpTest, ElementIndexingIntoRank3Tensor) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {1, 2, 3}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1})); } TEST(GatherNdOpTest, SliceIndexingIntoRank3Tensor) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 1}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 2}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3})); } TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor1) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 1, 3}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1})); } TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor2) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 1, 1}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 1.1, -1.2, 1.3, -2.1, 2.2, 2.3})); } TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor3) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 2, 2}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 1, 1, 0, 0, 0, 2, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3, 1.1, -1.2, 1.3, 6.1, -6.2, 6.3})); } TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor4) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 2, 3}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 0, 1, 1, 0, 1, 1, 1, 2, 2, 1, 2}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, 3.2, 4.3, 6.3})); } TEST(GatherNdOpTest, DuplicateIndexingIntoRank3Tensor) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 1, 0, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, -2.1, 2.2, 2.3})); } TEST(GatherNdOpTest, Float32Int32) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3})); } TEST(GatherNdOpTest, Float32Int64) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3})); } TEST(GatherNdOpTest, Int32Int32) { GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<int32_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Int32Int64) { GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<int32_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Uint8Int32) { GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3})); } TEST(GatherNdOpTest, Uint8Int64) { GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6}); m.SetPositions<int64_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3})); } TEST(GatherNdOpTest, Int8Int32) { GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<int8_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Int8Int64) { GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<int8_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Int16Int32) { GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<int16_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Int16Int64) { GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<int16_t>({1, -1, 1, -2, 2, 2, 3, 3, -3, -4, -4, 4, 5, -5, 5, 6, -6, 6}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3})); } TEST(GatherNdOpTest, Int64Int32) { GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL, 3LL, 3LL, -3LL, -4LL, -4LL, 4LL, 5LL, -5LL, 5LL, 6LL, -6LL, 6LL}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL})); } TEST(GatherNdOpTest, Int64Int64) { GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL, 3LL, 3LL, -3LL, -4LL, -4LL, 4LL, 5LL, -5LL, 5LL, 6LL, -6LL, 6LL}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<int64_t>(), ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL})); } TEST(GatherNdOpTest, Float32Int16) { GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}}, {TensorType_INT16, {2, 2}}); m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 3.1, 3.2, -3.3, -4.1, -4.2, 4.3, 5.1, -5.2, 5.3, 6.1, -6.2, 6.3}); m.SetPositions<int16_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3})); } TEST(GatherNdOpTest, StringInt32) { GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<std::string>({"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R"}); m.SetPositions<int32_t>({0, 1, 1, 0}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<std::string>(), ElementsAreArray({"D", "E", "F", "G", "H", "I"})); } TEST(GatherNdOpTest, StringInt64) { GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT64, {2, 2}}); m.SetInput<std::string>({"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R"}); m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput<std::string>(), ElementsAreArray({"D", "E", "F", "G", "H", "I"})); } TEST(GatherNdOpTest, StringOutOfBoundsTooLarge) { GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<std::string>({"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R"}); m.SetPositions<int32_t>({0, 0, 3, 0}); ASSERT_EQ(m.Invoke(), kTfLiteError); m.SetPositions<int32_t>({0, 0, 2, 2}); ASSERT_EQ(m.Invoke(), kTfLiteError); } TEST(GatherNdOpTest, StringOutOfBoundsNegative) { GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}}); m.SetInput<std::string>({"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R"}); m.SetPositions<int32_t>({1, -1, 0, 0}); ASSERT_EQ(m.Invoke(), kTfLiteError); } TEST(GatherNdOpTest, EmptyParamsAndIndex) { GatherNdOpModel m({TensorType_FLOAT32, {1, 0}}, {TensorType_INT32, {0, 2}}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({0})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e580d97c-eae2-4a33-ad5b-e764f270b769
cpp
abseil/abseil-cpp
raw_logging
absl/base/internal/raw_logging.cc
absl/base/raw_logging_test.cc
#include "absl/base/internal/raw_logging.h" #include <cstdarg> #include <cstddef> #include <cstdio> #include <cstdlib> #include <cstring> #include <string> #ifdef __EMSCRIPTEN__ #include <emscripten/console.h> #endif #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/errno_saver.h" #include "absl/base/log_severity.h" #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ defined(__hexagon__) || defined(__Fuchsia__) || \ defined(__native_client__) || defined(__OpenBSD__) || \ defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include <unistd.h> #define ABSL_HAVE_POSIX_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_POSIX_WRITE #endif #if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__) #include <sys/syscall.h> #define ABSL_HAVE_SYSCALL_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_SYSCALL_WRITE #endif #ifdef _WIN32 #include <io.h> #define ABSL_HAVE_RAW_IO 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_RAW_IO #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace raw_log_internal { namespace { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED constexpr char kTruncated[] = " ... (message truncated)\n"; bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { if (*size < 0) return false; int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap); bool result = true; if (n < 0 || n > *size) { result = false; if (static_cast<size_t>(*size) > sizeof(kTruncated)) { n = *size - static_cast<int>(sizeof(kTruncated)); } else { n = 0; } } *size -= n; *buf += n; return result; } #endif constexpr int kLogBufSize = 3000; bool DoRawLog(char** buf, int* size, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(3, 4); bool DoRawLog(char** buf, int* size, const char* format, ...) { if (*size < 0) return false; va_list ap; va_start(ap, format); int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap); va_end(ap); if (n < 0 || n > *size) return false; *size -= n; *buf += n; return true; } bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line, char** buf, int* buf_size) { DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line); return true; } ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<LogFilterAndPrefixHook> log_filter_and_prefix_hook(DefaultLogFilterAndPrefix); ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<AbortHook> abort_hook; void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) { char buffer[kLogBufSize]; char* buf = buffer; int size = sizeof(buffer); #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED bool enabled = true; #else bool enabled = false; #endif #ifdef ABSL_MIN_LOG_LEVEL if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) && severity < absl::LogSeverity::kFatal) { enabled = false; } #endif enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size); const char* const prefix_end = buf; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED if (enabled) { bool no_chop = VADoRawLog(&buf, &size, format, ap); if (no_chop) { DoRawLog(&buf, &size, "\n"); } else { DoRawLog(&buf, &size, "%s", kTruncated); } AsyncSignalSafeWriteError(buffer, static_cast<size_t>(buf - buffer)); } #else static_cast<void>(format); static_cast<void>(ap); static_cast<void>(enabled); #endif if (severity == absl::LogSeverity::kFatal) { abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); abort(); } } void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, const std::string& message) { RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()), message.data()); } } void AsyncSignalSafeWriteError(const char* s, size_t len) { if (!len) return; absl::base_internal::ErrnoSaver errno_saver; #if defined(__EMSCRIPTEN__) if (s[len - 1] == '\n') { len--; } #if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043 emscripten_errn(s, len); #else char buf[kLogBufSize]; if (len >= kLogBufSize) { len = kLogBufSize - 1; constexpr size_t trunc_len = sizeof(kTruncated) - 2; memcpy(buf + len - trunc_len, kTruncated, trunc_len); buf[len] = '\0'; len -= trunc_len; } else { buf[len] = '\0'; } memcpy(buf, s, len); _emscripten_err(buf); #endif #elif defined(ABSL_HAVE_SYSCALL_WRITE) syscall(SYS_write, STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_POSIX_WRITE) write(STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_RAW_IO) _write( 2, s, static_cast<unsigned>(len)); #else (void)s; (void)len; #endif } void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) { va_list ap; va_start(ap, format); RawLogVA(severity, file, line, format, ap); va_end(ap); } bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; #else return false; #endif } ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL absl::base_internal::AtomicHook<InternalLogFunction> internal_log_function(DefaultInternalLog); void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) { log_filter_and_prefix_hook.Store(func); } void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } void RegisterInternalLogFunction(InternalLogFunction func) { internal_log_function.Store(func); } } ABSL_NAMESPACE_END }
#include "absl/base/internal/raw_logging.h" #include <tuple> #include "gtest/gtest.h" #include "absl/strings/str_cat.h" namespace { TEST(RawLoggingCompilationTest, Log) { ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); } TEST(RawLoggingCompilationTest, LogWithNulls) { ABSL_RAW_LOG(INFO, "RAW INFO: %s%c%s", "Hello", 0, "World"); } TEST(RawLoggingCompilationTest, PassingCheck) { ABSL_RAW_CHECK(true, "RAW CHECK"); } const char kExpectedDeathOutput[] = ""; TEST(RawLoggingDeathTest, FailingCheck) { EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), kExpectedDeathOutput); } TEST(RawLoggingDeathTest, LogFatal) { EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), kExpectedDeathOutput); } TEST(InternalLog, CompilationTest) { ABSL_INTERNAL_LOG(INFO, "Internal Log"); std::string log_msg = "Internal Log"; ABSL_INTERNAL_LOG(INFO, log_msg); ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); float d = 1.1f; ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); } TEST(InternalLogDeathTest, FailingCheck) { EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), kExpectedDeathOutput); } TEST(InternalLogDeathTest, LogFatal) { EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), kExpectedDeathOutput); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/raw_logging.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/raw_logging_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
18f87e2e-9b1a-444a-9ef9-eca87df828f3
cpp
google/tensorstore
stop_token
tensorstore/util/stop_token.h
tensorstore/util/stop_token_test.cc
#ifndef TENSORSTORE_UTIL_STOP_TOKEN_H_ #define TENSORSTORE_UTIL_STOP_TOKEN_H_ #include <atomic> #include <cstddef> #include <type_traits> #include <utility> #include "absl/base/attributes.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/util/stop_token_impl.h" namespace tensorstore { class StopSource; template <typename Callback> class StopCallback; class StopToken { public: StopToken() noexcept = default; ~StopToken() noexcept = default; StopToken(const StopToken&) noexcept = default; StopToken(StopToken&&) noexcept = default; StopToken& operator=(const StopToken&) noexcept = default; StopToken& operator=(StopToken&&) noexcept = default; [[nodiscard]] bool stop_possible() const noexcept { return state_ != nullptr; } [[nodiscard]] bool stop_requested() const noexcept { return state_ != nullptr && state_->stop_requested(); } friend bool operator==(const StopToken& a, const StopToken& b) { return a.state_ == b.state_; } friend bool operator!=(const StopToken& a, const StopToken& b) { return !(a == b); } private: friend class StopSource; template <typename Callback> friend class StopCallback; StopToken(internal::IntrusivePtr<internal_stop_token::StopState> state) : state_(std::move(state)) {} internal::IntrusivePtr<internal_stop_token::StopState> state_{nullptr}; }; class StopSource { public: StopSource() noexcept : state_(internal::MakeIntrusivePtr<internal_stop_token::StopState>()) {} explicit StopSource(std::nullptr_t) noexcept : state_(nullptr) {} ~StopSource() noexcept = default; StopSource(const StopSource& b) noexcept = default; StopSource(StopSource&&) noexcept = default; StopSource& operator=(const StopSource& b) noexcept = default; StopSource& operator=(StopSource&&) noexcept = default; [[nodiscard]] bool stop_possible() const noexcept { return state_ != nullptr; } [[nodiscard]] bool stop_requested() const noexcept { return state_ != nullptr && state_->stop_requested(); } bool request_stop() const noexcept { if (state_ != nullptr) { return state_->RequestStop(); } return false; } [[nodiscard]] StopToken get_token() const noexcept { return StopToken(state_); } private: internal::IntrusivePtr<internal_stop_token::StopState> state_; }; template <typename Callback> class StopCallback : private internal_stop_token::StopCallbackBase { static_assert(std::is_invocable_v<Callback>); public: using callback_type = Callback; StopCallback(const StopCallback&) = delete; StopCallback& operator=(const StopCallback&) = delete; StopCallback(StopCallback&&) = delete; StopCallback& operator=(StopCallback&&) = delete; template < typename... Args, std::enable_if_t<std::is_constructible_v<Callback, Args...>, int> = 0> explicit StopCallback(const StopToken& token, Args&&... args) : callback_(std::forward<Args>(args)...) { internal_stop_token::StopState* state = token.state_.get(); if (state) { invoker_ = &StopCallback::Invoker; state->RegisterImpl(*this); } } ~StopCallback() { internal_stop_token::StopState* state = state_.exchange(nullptr, std::memory_order_acq_rel); if (state != nullptr) { state->UnregisterImpl(*this); } } private: static void Invoker(internal_stop_token::StopCallbackBase& self) noexcept { static_cast<Callback&&>(static_cast<StopCallback&&>(self).callback_)(); } ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Callback callback_; }; template <typename Callback> StopCallback(StopToken token, Callback callback) -> StopCallback<Callback>; } #endif
#include "tensorstore/util/stop_token.h" #include <functional> #include <optional> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/internal/testing/concurrent.h" namespace { TEST(StopTokenTest, Invariants) { tensorstore::StopSource source; EXPECT_TRUE(source.stop_possible()); EXPECT_FALSE(source.stop_requested()); tensorstore::StopToken token = source.get_token(); EXPECT_TRUE(source.stop_possible()); EXPECT_FALSE(source.stop_requested()); EXPECT_EQ(token, source.get_token()); EXPECT_TRUE(source.request_stop()); EXPECT_TRUE(source.stop_possible()); EXPECT_TRUE(source.stop_requested()); EXPECT_TRUE(token.stop_requested()); { tensorstore::StopSource source2; EXPECT_NE(token, source2.get_token()); } } TEST(StopTokenTest, Invariants_Null) { tensorstore::StopSource source(nullptr); EXPECT_FALSE(source.stop_possible()); EXPECT_FALSE(source.stop_requested()); tensorstore::StopToken token = source.get_token(); EXPECT_FALSE(source.stop_possible()); EXPECT_FALSE(source.stop_requested()); EXPECT_EQ(token, source.get_token()); EXPECT_FALSE(source.request_stop()); EXPECT_FALSE(source.stop_possible()); EXPECT_FALSE(source.stop_requested()); EXPECT_FALSE(token.stop_requested()); { tensorstore::StopSource source2; EXPECT_NE(token, source2.get_token()); } } TEST(StopTokenTest, Basic_InScope) { tensorstore::StopSource source; bool called = false; { tensorstore::StopCallback callback(source.get_token(), [&]() { called = true; }); EXPECT_FALSE(called); EXPECT_TRUE(source.request_stop()); } EXPECT_TRUE(called); } TEST(StopTokenTest, Basic_NotInScope) { tensorstore::StopSource source; bool called = false; { tensorstore::StopCallback callback(source.get_token(), [&]() { called = true; }); EXPECT_FALSE(called); } EXPECT_TRUE(source.request_stop()); EXPECT_FALSE(called); } TEST(StopTokenTest, Basic_Null) { tensorstore::StopSource source(nullptr); bool called = false; { tensorstore::StopCallback callback(source.get_token(), [&]() { called = true; }); EXPECT_FALSE(called); EXPECT_FALSE(source.request_stop()); } EXPECT_FALSE(called); } TEST(StopTokenTest, StopAlreadyRequested) { tensorstore::StopSource source; EXPECT_TRUE(source.request_stop()); bool called = false; tensorstore::StopCallback callback(source.get_token(), [&]() { called = true; }); EXPECT_TRUE(called); } TEST(StopTokenTest, CallbackOrder) { bool called[3] = {}; auto do_nothing = []() {}; using DoNothingCallback = tensorstore::StopCallback<decltype(do_nothing)>; tensorstore::StopSource source; auto x = std::make_unique<DoNothingCallback>(source.get_token(), do_nothing); tensorstore::StopCallback callback0(source.get_token(), [&]() { EXPECT_TRUE(called[1]); called[0] = true; }); tensorstore::StopCallback callback1(source.get_token(), [&]() { EXPECT_TRUE(called[2]); called[1] = true; }); tensorstore::StopCallback callback2(source.get_token(), [&]() { EXPECT_FALSE(called[0]); called[2] = true; }); { DoNothingCallback tmp(source.get_token(), do_nothing); } x = nullptr; EXPECT_TRUE(source.request_stop()); EXPECT_TRUE(called[2]); } TEST(StopCallbackTest, InvokeValueCategory) { struct Callback { void operator()() const& { value += 1; } void operator()() && { value += 100; } int& value; }; tensorstore::StopSource source; int counts[3] = {}; tensorstore::StopCallback stop_callback0(source.get_token(), Callback{counts[0]}); Callback callback1{counts[1]}; tensorstore::StopCallback<Callback&> stop_callback1(source.get_token(), callback1); tensorstore::StopCallback<const Callback> stop_callback2(source.get_token(), Callback{counts[2]}); source.request_stop(); EXPECT_THAT(counts, ::testing::ElementsAre(100, 1, 1)); } TEST(StopTokenTest, SelfDeregister) { tensorstore::StopSource source; std::optional<tensorstore::StopCallback<std::function<void()>>> callback{ std::in_place, source.get_token(), [&] { callback = std::nullopt; }}; EXPECT_TRUE(source.request_stop()); EXPECT_FALSE(callback.has_value()); } TEST(StopTokenTest, Concurrent) { tensorstore::StopSource source; bool called = false; std::optional<tensorstore::StopCallback<std::function<void()>>> callback; ::tensorstore::internal_testing::TestConcurrent( 100, [&] { tensorstore::StopSource new_source; source = std::move(new_source); called = false; }, [&] { EXPECT_TRUE(source.stop_requested()); callback = std::nullopt; EXPECT_TRUE(called); }, [&] { callback.emplace(source.get_token(), [&]() { called = true; }); }, [&] { source.request_stop(); }, [&] { tensorstore::StopCallback callback(source.get_token(), []() {}); } ); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token.h
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token_test.cc
4f887a6430414cd6088e1743555015b10f116d50
1c5201c2-01d4-4a96-b235-4a452e3fa598
cpp
tensorflow/tensorflow
semantic_version
third_party/xla/xla/stream_executor/semantic_version.cc
third_party/xla/xla/stream_executor/semantic_version_test.cc
#include "xla/stream_executor/semantic_version.h" #include <string> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/numbers.h" #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "tsl/platform/statusor.h" namespace stream_executor { std::string SemanticVersion::ToString() const { return absl::StrFormat("%d.%d.%d", major_, minor_, patch_); } static absl::StatusOr<unsigned> ParseUnsignedNumber( absl::string_view component) { unsigned number; if (!absl::SimpleAtoi(component, &number)) { return absl::InvalidArgumentError( absl::StrFormat("'%s' is not an unsigned number.", component)); } return number; } absl::StatusOr<SemanticVersion> SemanticVersion::ParseFromString( absl::string_view str) { std::vector<absl::string_view> components = absl::StrSplit(str, '.'); if (components.size() != 3) { return absl::InvalidArgumentError( "Version does not match the format X.Y.Z"); } SemanticVersion result{0, 0, 0}; TF_ASSIGN_OR_RETURN(result.major(), ParseUnsignedNumber(components[0])); TF_ASSIGN_OR_RETURN(result.minor(), ParseUnsignedNumber(components[1])); TF_ASSIGN_OR_RETURN(result.patch(), ParseUnsignedNumber(components[2])); return result; } }
#include "xla/stream_executor/semantic_version.h" #include <algorithm> #include <array> #include <sstream> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/hash/hash_testing.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/test.h" namespace stream_executor { namespace { TEST(SemanticVersion, Construction) { SemanticVersion version{1, 2, 3}; EXPECT_EQ(version.major(), 1); EXPECT_EQ(version.minor(), 2); EXPECT_EQ(version.patch(), 3); } TEST(SemanticVersion, ConstructionFromArray) { SemanticVersion version{std::array<unsigned, 3>{1, 2, 3}}; EXPECT_EQ(version.major(), 1); EXPECT_EQ(version.minor(), 2); EXPECT_EQ(version.patch(), 3); } TEST(SemanticVersion, Mutation) { SemanticVersion version{0, 0, 0}; version.major() = 1; version.minor() = 2; version.patch() = 3; EXPECT_EQ(version.major(), 1); EXPECT_EQ(version.minor(), 2); EXPECT_EQ(version.patch(), 3); } TEST(SemanticVersion, ParseFromStringSuccess) { absl::StatusOr<SemanticVersion> version = SemanticVersion::ParseFromString("1.2.3"); ASSERT_THAT(version, tsl::testing::IsOk()); EXPECT_EQ(version->major(), 1); EXPECT_EQ(version->minor(), 2); EXPECT_EQ(version->patch(), 3); } TEST(SemanticVersion, ParseFromStringInvalid) { auto test = [](absl::string_view str) { absl::StatusOr<SemanticVersion> version = SemanticVersion::ParseFromString(str); EXPECT_THAT(version, tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument)); }; test("1.2"); test("1.2.3dev5"); } TEST(SemanticVersion, ToString) { SemanticVersion version{1, 2, 3}; EXPECT_EQ(version.ToString(), "1.2.3"); } TEST(SemanticVersion, AbslStringify) { SemanticVersion version{1, 2, 3}; EXPECT_EQ(absl::StrCat(version), version.ToString()); } TEST(SemanticVersion, OStream) { SemanticVersion version{1, 2, 3}; std::ostringstream os; os << version; EXPECT_EQ(os.str(), version.ToString()); } TEST(SemanticVersion, Equality) { SemanticVersion version{1, 2, 3}; SemanticVersion other{1, 2, 4}; EXPECT_EQ(version, version); EXPECT_FALSE(version != version); EXPECT_NE(version, other); EXPECT_FALSE(version == other); } TEST(SemanticVersion, Ordering) { std::array<SemanticVersion, 5> versions = { SemanticVersion{3, 3, 3}, SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0}}; std::sort(versions.begin(), versions.end()); EXPECT_THAT(versions, testing::ElementsAre( SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0}, SemanticVersion{3, 3, 3})); } TEST(SemanticVersion, Hash) { EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0}, SemanticVersion{3, 3, 3}, })); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8ff2a112-c796-41ea-bcf0-13b123b8378b
cpp
tensorflow/tensorflow
device_name_utils
third_party/xla/xla/tsl/util/device_name_utils.cc
third_party/xla/xla/tsl/util/device_name_utils_test.cc
#include "xla/tsl/util/device_name_utils.h" #include <algorithm> #include "tsl/platform/errors.h" namespace tsl { static bool IsAlpha(char c) { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } static bool IsAlphaNumOrUnderscore(char c) { return IsAlpha(c) || (c >= '0' && c <= '9') || c == '_'; } static bool IsJobName(absl::string_view in) { return !in.empty() && IsAlpha(in.front()) && std::all_of(in.begin(), in.end(), IsAlphaNumOrUnderscore); } static bool ConsumePrefix(absl::string_view* in, string* out, absl::string_view prefix_terminators) { if (in->empty() || !IsAlpha(in->front())) return false; const auto end_it = std::find_first_of(in->begin(), in->end(), prefix_terminators.begin(), prefix_terminators.end()); if (!std::all_of(in->begin(), end_it, IsAlphaNumOrUnderscore)) { return false; } out->assign(in->begin(), end_it); in->remove_prefix(end_it - in->begin()); return true; } static bool ConsumeJobName(absl::string_view* in, string* job) { return ConsumePrefix(in, job, "/"); } static bool ConsumeDeviceType(absl::string_view* in, string* device_type) { return ConsumePrefix(in, device_type, "/:"); } static bool ConsumeNumber(absl::string_view* in, int* val) { uint64 tmp; if (str_util::ConsumeLeadingDigits(in, &tmp)) { *val = tmp; return true; } else { return false; } } static string DeviceName(const string& job, int replica, int task, const string& device_prefix, const string& device_type, int id) { CHECK(IsJobName(job)) << job; CHECK_LE(0, replica); CHECK_LE(0, task); CHECK(!device_type.empty()); CHECK_LE(0, id); return strings::StrCat("/job:", job, "/replica:", replica, "/task:", task, device_prefix, device_type, ":", id); } string DeviceNameUtils::FullName(const string& job, int replica, int task, const string& type, int id) { return DeviceName(job, replica, task, "/device:", type, id); } namespace { string LegacyName(const string& job, int replica, int task, const string& type, int id) { return DeviceName(job, replica, task, "/", absl::AsciiStrToLower(type), id); } } bool DeviceNameUtils::ParseFullName(absl::string_view fullname, ParsedName* p) { p->Clear(); if (fullname == "/") { return true; } while (!fullname.empty()) { bool progress = false; if (absl::ConsumePrefix(&fullname, "/job:")) { p->has_job = !absl::ConsumePrefix(&fullname, "*"); if (p->has_job && !ConsumeJobName(&fullname, &p->job)) { return false; } progress = true; } if (absl::ConsumePrefix(&fullname, "/replica:")) { p->has_replica = !absl::ConsumePrefix(&fullname, "*"); if (p->has_replica && !ConsumeNumber(&fullname, &p->replica)) { return false; } progress = true; } if (absl::ConsumePrefix(&fullname, "/task:")) { p->has_task = !absl::ConsumePrefix(&fullname, "*"); if (p->has_task && !ConsumeNumber(&fullname, &p->task)) { return false; } progress = true; } if (absl::ConsumePrefix(&fullname, "/device:")) { p->has_type = !absl::ConsumePrefix(&fullname, "*"); if (p->has_type && !ConsumeDeviceType(&fullname, &p->type)) { return false; } if (!absl::ConsumePrefix(&fullname, ":")) { p->has_id = false; } else { p->has_id = !absl::ConsumePrefix(&fullname, "*"); if (p->has_id && !ConsumeNumber(&fullname, &p->id)) { return false; } } progress = true; } if (absl::ConsumePrefix(&fullname, "/cpu:") || absl::ConsumePrefix(&fullname, "/CPU:")) { p->has_type = true; p->type = "CPU"; p->has_id = !absl::ConsumePrefix(&fullname, "*"); if (p->has_id && !ConsumeNumber(&fullname, &p->id)) { return false; } progress = true; } if (absl::ConsumePrefix(&fullname, "/gpu:") || absl::ConsumePrefix(&fullname, "/GPU:")) { p->has_type = true; p->type = "GPU"; p->has_id = !absl::ConsumePrefix(&fullname, "*"); if (p->has_id && !ConsumeNumber(&fullname, &p->id)) { return false; } progress = true; } if (!progress) { return false; } } return true; } bool DeviceNameUtils::ParseFullOrLocalName(absl::string_view fullname, ParsedName* p) { return ParseFullName(fullname, p) || ParseLocalName(fullname, p); } namespace { void CompleteName(const DeviceNameUtils::ParsedName& parsed_basename, DeviceNameUtils::ParsedName* parsed_name) { if (!parsed_name->has_job) { parsed_name->job = parsed_basename.job; parsed_name->has_job = true; } if (!parsed_name->has_replica) { parsed_name->replica = parsed_basename.replica; parsed_name->has_replica = true; } if (!parsed_name->has_task) { parsed_name->task = parsed_basename.task; parsed_name->has_task = true; } if (!parsed_name->has_type) { parsed_name->type = parsed_basename.type; parsed_name->has_type = true; } if (!parsed_name->has_id) { parsed_name->id = parsed_basename.id; parsed_name->has_id = true; } } } absl::Status DeviceNameUtils::CanonicalizeDeviceName(absl::string_view fullname, absl::string_view basename, string* canonical_name) { *canonical_name = ""; ParsedName parsed_basename; if (!ParseFullName(basename, &parsed_basename)) { return errors::InvalidArgument("Could not parse basename: ", basename, " into a device specification."); } if (!(parsed_basename.has_job && parsed_basename.has_replica && parsed_basename.has_task && parsed_basename.has_type && parsed_basename.has_id)) { return errors::InvalidArgument("Basename: ", basename, " should be fully " "specified."); } ParsedName parsed_name; if (ParseLocalName(fullname, &parsed_name)) { CompleteName(parsed_basename, &parsed_name); *canonical_name = ParsedNameToString(parsed_name); return absl::OkStatus(); } if (ParseFullName(fullname, &parsed_name)) { CompleteName(parsed_basename, &parsed_name); *canonical_name = ParsedNameToString(parsed_name); return absl::OkStatus(); } return errors::InvalidArgument("Could not parse ", fullname, " into a device " "specification."); } string DeviceNameUtils::ParsedNameToString(const ParsedName& pn) { string buf; if (pn.has_job) strings::StrAppend(&buf, "/job:", pn.job); if (pn.has_replica) strings::StrAppend(&buf, "/replica:", pn.replica); if (pn.has_task) strings::StrAppend(&buf, "/task:", pn.task); if (pn.has_type) { strings::StrAppend(&buf, "/device:", pn.type, ":"); if (pn.has_id) { strings::StrAppend(&buf, pn.id); } else { strings::StrAppend(&buf, "*"); } } return buf; } bool DeviceNameUtils::IsSpecification(const ParsedName& less_specific, const ParsedName& more_specific) { if (less_specific.has_job && (!more_specific.has_job || (less_specific.job != more_specific.job))) { return false; } if (less_specific.has_replica && (!more_specific.has_replica || (less_specific.replica != more_specific.replica))) { return false; } if (less_specific.has_task && (!more_specific.has_task || (less_specific.task != more_specific.task))) { return false; } if (less_specific.has_type && (!more_specific.has_type || (less_specific.type != more_specific.type))) { return false; } if (less_specific.has_id && (!more_specific.has_id || (less_specific.id != more_specific.id))) { return false; } return true; } bool DeviceNameUtils::AreCompatibleDevNames(const ParsedName& a, const ParsedName& b) { if (a.has_job && b.has_job && (a.job != b.job)) { return false; } if (a.has_replica && b.has_replica && (a.replica != b.replica)) { return false; } if (a.has_task && b.has_task && (a.task != b.task)) { return false; } if (a.has_type && b.has_type && (a.type != b.type)) { return false; } if (a.has_id && b.has_id && (a.id != b.id)) { return false; } return true; } void DeviceNameUtils::EnsureSpecification(ParsedName* more_specific, const ParsedName& less_specific) { if (less_specific.has_job) { more_specific->has_job = true; more_specific->job = less_specific.job; } if (less_specific.has_replica) { more_specific->has_replica = true; more_specific->replica = less_specific.replica; } if (less_specific.has_task) { more_specific->has_task = true; more_specific->task = less_specific.task; } if (less_specific.has_type) { more_specific->has_type = true; more_specific->type = less_specific.type; } if (less_specific.has_id) { more_specific->has_id = true; more_specific->id = less_specific.id; } } bool DeviceNameUtils::IsCompleteSpecification(const ParsedName& pattern, const ParsedName& name) { CHECK(name.has_job && name.has_replica && name.has_task && name.has_type && name.has_id); if (pattern.has_job && (pattern.job != name.job)) return false; if (pattern.has_replica && (pattern.replica != name.replica)) return false; if (pattern.has_task && (pattern.task != name.task)) return false; if (pattern.has_type && (pattern.type != name.type)) return false; if (pattern.has_id && (pattern.id != name.id)) return false; return true; } namespace { absl::Status MergeDevNamesImpl(DeviceNameUtils::ParsedName* target, const DeviceNameUtils::ParsedName& other, bool allow_soft_placement, bool override_conflicts) { const auto& ParsedNameToString = DeviceNameUtils::ParsedNameToString; if (other.has_job) { if (target->has_job && target->job != other.job) { return errors::InvalidArgument( "Cannot merge devices with incompatible jobs: '", ParsedNameToString(*target), "' and '", ParsedNameToString(other), "'"); } else { target->has_job = other.has_job; target->job = other.job; } } if (other.has_replica) { if (target->has_replica && target->replica != other.replica) { return errors::InvalidArgument( "Cannot merge devices with incompatible replicas: '", ParsedNameToString(*target), "' and '", ParsedNameToString(other), "'"); } else { target->has_replica = other.has_replica; target->replica = other.replica; } } if (other.has_task) { if (target->has_task && target->task != other.task) { return errors::InvalidArgument( "Cannot merge devices with incompatible tasks: '", ParsedNameToString(*target), "' and '", ParsedNameToString(other), "'"); } else { target->has_task = other.has_task; target->task = other.task; } } if (other.has_type) { if (target->has_type && target->type != other.type) { if (!allow_soft_placement) { return errors::InvalidArgument( "Cannot merge devices with incompatible types: '", ParsedNameToString(*target), "' and '", ParsedNameToString(other), "'"); } else if (override_conflicts) { target->type = other.type; } else { target->has_id = false; target->has_type = false; return absl::OkStatus(); } } else { target->has_type = other.has_type; target->type = other.type; } } if (other.has_id) { if (target->has_id && target->id != other.id) { if (!allow_soft_placement) { return errors::InvalidArgument( "Cannot merge devices with incompatible ids: '", ParsedNameToString(*target), "' and '", ParsedNameToString(other), "'"); } else if (override_conflicts) { target->id = other.id; } else { target->has_id = false; return absl::OkStatus(); } } else { target->has_id = other.has_id; target->id = other.id; } } return absl::OkStatus(); } } absl::Status DeviceNameUtils::MergeDevNames(ParsedName* target, const ParsedName& other, bool allow_soft_placement) { return MergeDevNamesImpl(target, other, allow_soft_placement, false); } absl::Status DeviceNameUtils::MergeOverrideDevNames(ParsedName* target, const ParsedName& other) { return MergeDevNamesImpl(target, other, true, true); } void DeviceNameUtils::MergeUnsetDevNames(ParsedName* target, const ParsedName& other) { if (other.has_job && !target->has_job) { target->has_job = other.has_job; target->job = other.job; } if (other.has_replica && !target->has_replica) { target->has_replica = other.has_replica; target->replica = other.replica; } if (other.has_task && !target->has_task) { target->has_task = other.has_task; target->task = other.task; } if (other.has_type && !target->has_type) { target->has_type = other.has_type; target->type = other.type; } if (other.has_id && !target->has_id) { target->has_id = other.has_id; target->id = other.id; } } bool DeviceNameUtils::IsSameAddressSpace(const ParsedName& a, const ParsedName& b) { return (a.has_job && b.has_job && (a.job == b.job)) && (a.has_replica && b.has_replica && (a.replica == b.replica)) && (a.has_task && b.has_task && (a.task == b.task)); } bool DeviceNameUtils::IsSameAddressSpace(absl::string_view src, absl::string_view dst) { ParsedName x; ParsedName y; return ParseFullName(src, &x) && ParseFullName(dst, &y) && IsSameAddressSpace(x, y); } bool DeviceNameUtils::IsDifferentAddressSpace(const ParsedName& a, const ParsedName& b) { return (a.has_job && b.has_job && (a.job != b.job)) || (a.has_replica && b.has_replica && (a.replica != b.replica)) || (a.has_task && b.has_task && (a.task != b.task)); } const DeviceNameUtils::ParsedName DeviceNameUtils::AddressSpace( const ParsedName& name) { ParsedName address_space; address_space.has_job = name.has_job; address_space.has_replica = name.has_replica; address_space.has_task = name.has_task; address_space.job = name.job; address_space.replica = name.replica; address_space.task = name.task; return address_space; } string DeviceNameUtils::LocalName(absl::string_view type, int id) { return strings::StrCat("/device:", type, ":", id); } namespace { string LegacyLocalName(absl::string_view type, int id) { return strings::StrCat(type, ":", id); } } string DeviceNameUtils::LocalName(absl::string_view fullname) { ParsedName x; CHECK(ParseFullName(fullname, &x)) << fullname; return LocalName(x.type, x.id); } bool DeviceNameUtils::ParseLocalName(absl::string_view name, ParsedName* p) { if (!ConsumeDeviceType(&name, &p->type)) { return false; } p->has_type = true; if (!absl::ConsumePrefix(&name, ":")) { return false; } if (!ConsumeNumber(&name, &p->id)) { return false; } p->has_id = true; return name.empty(); } bool DeviceNameUtils::SplitDeviceName(absl::string_view name, string* task, string* device) { ParsedName pn; if (ParseFullName(name, &pn) && pn.has_type && pn.has_id) { task->clear(); task->reserve( (pn.has_job ? (5 + pn.job.size()) : 0) + (pn.has_replica ? (9 + 4 ) : 0) + (pn.has_task ? (6 + 4 ) : 0)); if (pn.has_job) { strings::StrAppend(task, "/job:", pn.job); } if (pn.has_replica) { strings::StrAppend(task, "/replica:", pn.replica); } if (pn.has_task) { strings::StrAppend(task, "/task:", pn.task); } device->clear(); strings::StrAppend(device, pn.type, ":", pn.id); return true; } return false; } bool DeviceNameUtils::GetTaskName(const ParsedName& pn, string* task) { if (pn.has_job && pn.has_replica && pn.has_task) { task->clear(); task->reserve((5 + pn.job.size()) + (9 + 4 ) + (6 + 4 )); strings::StrAppend(task, "/job:", pn.job); strings::StrAppend(task, "/replica:", pn.replica); strings::StrAppend(task, "/task:", pn.task); return true; } return false; } std::vector<string> DeviceNameUtils::GetNamesForDeviceMappings( const ParsedName& pn) { if (pn.has_job && pn.has_replica && pn.has_task && pn.has_type && pn.has_id) { return { DeviceNameUtils::FullName(pn.job, pn.replica, pn.task, pn.type, pn.id), LegacyName(pn.job, pn.replica, pn.task, pn.type, pn.id)}; } else { return {}; } } std::vector<string> DeviceNameUtils::GetLocalNamesForDeviceMappings( const ParsedName& pn) { if (pn.has_type && pn.has_id) { return {DeviceNameUtils::LocalName(pn.type, pn.id), LegacyLocalName(pn.type, pn.id)}; } else { return {}; } } absl::Status DeviceNameUtils::DeviceNameToCpuDeviceName( const string& device_name, string* host_device_name) { DeviceNameUtils::ParsedName device; if (!DeviceNameUtils::ParseFullName(device_name, &device)) { return errors::Internal("Could not parse device name ", device_name); } device.type = "CPU"; device.has_type = true; device.id = 0; device.has_id = true; *host_device_name = DeviceNameUtils::ParsedNameToString(device); return absl::OkStatus(); } std::ostream& operator<<(std::ostream& os, const DeviceNameUtils::ParsedName& x) { os << DeviceNameUtils::ParsedNameToString(x); return os; } }
#include "xla/tsl/util/device_name_utils.h" #include <vector> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/strcat.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" namespace tsl { namespace { bool RoundTripParsedName(const string& original, const string& expected) { DeviceNameUtils::ParsedName p; if (!DeviceNameUtils::ParseFullName(original, &p)) { return false; } string round_tripped = DeviceNameUtils::ParsedNameToString(p); return (round_tripped == expected); } enum NamePart { kJob = 0x01, kReplica = 0x02, kTask = 0x04, kDevice = 0x08 }; bool RoundTripPartialName(int parts_to_test, const std::vector<string>& parts, bool explicitDevice) { string original, expected; if (parts_to_test & kJob) { strings::StrAppend(&original, "/job:", parts[0]); strings::StrAppend(&expected, "/job:", parts[0]); } if (parts_to_test & kReplica) { strings::StrAppend(&original, "/replica:", parts[1]); strings::StrAppend(&expected, "/replica:", parts[1]); } if (parts_to_test & kTask) { strings::StrAppend(&original, "/task:", parts[2]); strings::StrAppend(&expected, "/task:", parts[2]); } if (parts_to_test & kDevice) { if (explicitDevice) { strings::StrAppend(&original, "/device:", parts[3]); strings::StrAppend(&expected, "/device:", parts[3]); } else { strings::StrAppend(&original, "/", parts[3]); strings::StrAppend(&expected, "/device:", absl::AsciiStrToUpper(parts[3])); } } return RoundTripParsedName(original, expected); } } TEST(DeviceNameUtilsTest, Basic) { EXPECT_EQ(DeviceNameUtils::FullName("hello", 1, 2, "CPU", 3), "/job:hello/replica:1/task:2/device:CPU:3"); { DeviceNameUtils::ParsedName p; EXPECT_FALSE(DeviceNameUtils::ParseFullName("foobar", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullName( "/job:123/replica:1/task:2/device:GPU:3", &p)); EXPECT_FALSE( DeviceNameUtils::ParseFullName("/job:123/replica:1/task:2/gpu:", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullName( "/job:123/replica:1/task:2/device:gpu:", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullName( "/job:foo/replica:-1/task:2/device:GPU:3", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullName( "/job:foo/replica:1/task:-2/device:GPU:3", &p)); EXPECT_FALSE( DeviceNameUtils::ParseFullName("/job:foo/replica:1/task:2/bar:3", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullName( "/job:foo/replica:1/task:2/device:GPU:3/extra", &p)); EXPECT_TRUE(DeviceNameUtils::ParseFullName( "/job:foo/replica:1/task:2/device:GPU:3", &p)); EXPECT_TRUE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_TRUE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_TRUE(p.has_id); EXPECT_EQ(p.job, "foo"); EXPECT_EQ(p.replica, 1); EXPECT_EQ(p.task, 2); EXPECT_EQ(p.type, "GPU"); EXPECT_EQ(p.id, 3); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName( "/job:foo_bar/replica:1/task:2/device:GPU:3", &p)); EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName( "/job:foo_bar/replica:1/task:2/device:GPU:3", &p)); EXPECT_TRUE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_TRUE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_TRUE(p.has_id); EXPECT_EQ(p.job, "foo_bar"); EXPECT_EQ(p.replica, 1); EXPECT_EQ(p.task, 2); EXPECT_EQ(p.type, "GPU"); EXPECT_EQ(p.id, 3); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName( "/job:foo_bar/replica:1/task:2/device:GPU:3", &p)); EXPECT_TRUE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_TRUE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_TRUE(p.has_id); EXPECT_EQ(p.job, "foo_bar"); EXPECT_EQ(p.replica, 1); EXPECT_EQ(p.task, 2); EXPECT_EQ(p.type, "GPU"); EXPECT_EQ(p.id, 3); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName("/job:*/replica:4/gpu:*", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_FALSE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "GPU"); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE( DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:*", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_FALSE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "GPU"); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE( DeviceNameUtils::ParseFullName("/job:*/device:GPU/replica:4", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_FALSE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "GPU"); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName( "/job:*/replica:4/device:myspecialdevice:13", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_TRUE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "myspecialdevice"); EXPECT_EQ(p.id, 13); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName("/", &p)); EXPECT_FALSE(p.has_job); EXPECT_FALSE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_FALSE(p.has_type); EXPECT_FALSE(p.has_id); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE( DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:5", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_TRUE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "GPU"); EXPECT_EQ(p.id, 5); } { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseFullName("/gpu:*/job:*/replica:4", &p)); EXPECT_FALSE(p.has_job); EXPECT_TRUE(p.has_replica); EXPECT_FALSE(p.has_task); EXPECT_TRUE(p.has_type); EXPECT_FALSE(p.has_id); EXPECT_EQ(p.replica, 4); EXPECT_EQ(p.type, "GPU"); } EXPECT_TRUE(DeviceNameUtils::IsSameAddressSpace( "/job:foo/replica:1/task:2/cpu:3", "/job:foo/replica:1/task:2/device:GPU:4")); EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace( "/job:foo/replica:1/task:2/cpu:3", "/job:foo/replica:1/task:3/device:GPU:4")); EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace( "/job:foo/replica:1/task:2/cpu:3", "/job:foo/replica:10/task:2/device:GPU:4")); EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace( "/job:foo/replica:1/task:2/cpu:3", "/job:bar/replica:1/task:2/device:GPU:4")); EXPECT_EQ(DeviceNameUtils::LocalName("CPU", 1), "/device:CPU:1"); EXPECT_EQ(DeviceNameUtils::LocalName("GPU", 2), "/device:GPU:2"); EXPECT_EQ(DeviceNameUtils::LocalName("MySpecialDevice", 13), "/device:MySpecialDevice:13"); EXPECT_EQ( DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:CPU:3"), "/device:CPU:3"); EXPECT_EQ(DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/cpu:3"), "/device:CPU:3"); EXPECT_EQ( DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:abc:73"), "/device:abc:73"); { DeviceNameUtils::ParsedName p; EXPECT_TRUE(DeviceNameUtils::ParseLocalName("CPU:10", &p)); EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName("CPU:10", &p)); EXPECT_EQ(p.type, "CPU"); EXPECT_EQ(p.id, 10); EXPECT_FALSE(DeviceNameUtils::ParseLocalName("cpu:abc", &p)); EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc:", &p)); EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc", &p)); EXPECT_FALSE(DeviceNameUtils::ParseLocalName("myspecialdevice", &p)); EXPECT_FALSE(DeviceNameUtils::ParseFullOrLocalName("myspecialdevice", &p)); } { for (int i = 0; i < 0x10; ++i) { EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"}, false)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"}, false)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"}, false)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"}, false)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"}, true)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"}, true)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"}, true)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"}, true)); EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "someDevice:3"}, true)); } } { DeviceNameUtils::ParsedName x, y; DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3/device:GPU:*", &x); DeviceNameUtils::ParseFullName("/device:CPU:*", &y); EXPECT_FALSE(DeviceNameUtils::AreCompatibleDevNames(x, y)); } { DeviceNameUtils::ParsedName x, y; DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3", &x); DeviceNameUtils::ParseFullName("/device:CPU:*", &y); EXPECT_TRUE(DeviceNameUtils::AreCompatibleDevNames(x, y)); } } static bool IsCSHelper(absl::string_view pattern, absl::string_view actual) { DeviceNameUtils::ParsedName p, a; EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p)); EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a)); return DeviceNameUtils::IsCompleteSpecification(p, a); } TEST(DeviceNameUtilsTest, IsCompleteSpecification) { EXPECT_TRUE(IsCSHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsCSHelper("/job:*/replica:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE( IsCSHelper("/job:*/task:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsCSHelper("/job:*/replica:*/task:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsCSHelper("/job:*/replica:*/gpu:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_FALSE( IsCSHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3")); EXPECT_FALSE( IsCSHelper("/device:GPU:2", "/job:worker/replica:1/task:2/device:GPU:1")); EXPECT_TRUE( IsCSHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3")); } static bool IsSpecHelper(absl::string_view pattern, absl::string_view actual) { DeviceNameUtils::ParsedName p, a; EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p)); EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a)); return DeviceNameUtils::IsSpecification(p, a); } TEST(DeviceNameUtilsTest, IsSpecification) { EXPECT_TRUE( IsSpecHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1")); EXPECT_TRUE(IsSpecHelper("/job:*", "/replica:1")); EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work")); EXPECT_TRUE(IsSpecHelper("/job:*/replica:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/gpu:*", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/device:GPU:3", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/task:2", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/job:work/replica:*/task:2", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/task:*", "/job:*/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/task:2", "/job:*/replica:1/task:2/device:GPU:3")); EXPECT_TRUE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/cpu:1")); EXPECT_TRUE(IsSpecHelper("/cpu:0", "/cpu:0")); EXPECT_TRUE( IsSpecHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3")); EXPECT_FALSE( IsSpecHelper("/job:worker/replica:1/task:2/device:GPU:3", "/gpu:*")); EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2")); EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/device:GPU:1")); EXPECT_FALSE( IsSpecHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3")); EXPECT_FALSE(IsSpecHelper("/device:GPU:2", "/job:worker/replica:1/task:2/device:GPU:1")); EXPECT_FALSE(IsSpecHelper("/job:work/replica:*/task:0", "/job:work/replica:1/task:2/device:GPU:3")); EXPECT_FALSE(IsSpecHelper("/job:work/replica:0/task:2", "/job:work/replica:*/task:2/device:GPU:3")); } TEST(DeviceNameUtilsTest, SplitDeviceName) { string task; string device; EXPECT_TRUE(DeviceNameUtils::SplitDeviceName( "/job:foo/replica:1/task:2/cpu:1", &task, &device)); EXPECT_EQ("/job:foo/replica:1/task:2", task); EXPECT_EQ("CPU:1", device); EXPECT_TRUE(DeviceNameUtils::SplitDeviceName( "/job:foo/cpu:1/task:2/replica:1", &task, &device)); EXPECT_EQ("/job:foo/replica:1/task:2", task); EXPECT_EQ("CPU:1", device); EXPECT_TRUE( DeviceNameUtils::SplitDeviceName("/device:GPU:3", &task, &device)); EXPECT_EQ("", task); EXPECT_EQ("GPU:3", device); EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("gpu:3", &task, &device)); EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("/job:foo/task:2/replica:1", &task, &device)); EXPECT_TRUE(DeviceNameUtils::SplitDeviceName("/device:myspecialdevice:3", &task, &device)); EXPECT_EQ("", task); EXPECT_EQ("myspecialdevice:3", device); } static DeviceNameUtils::ParsedName Name(const string& str) { DeviceNameUtils::ParsedName ret; CHECK(DeviceNameUtils::ParseFullName(str, &ret)) << "Invalid name: " << str; return ret; } static void MergeDevNamesHelperImpl(const string& name_a, const string& name_b, const string& expected_merge_name, bool allow_soft_placement) { DeviceNameUtils::ParsedName target_a = Name(name_a); TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b), allow_soft_placement)); DeviceNameUtils::ParsedName target_b = Name(name_b); TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a), allow_soft_placement)); EXPECT_EQ(target_a, target_b); EXPECT_EQ(target_a, Name(expected_merge_name)); EXPECT_EQ(target_b, Name(expected_merge_name)); } static void MergeDevNamesHelper(const string& name_a, const string& name_b, const string& expected_merge_name) { MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, false); } static void MergeDevNamesHelperAllowSoftPlacement( const string& name_a, const string& name_b, const string& expected_merge_name) { MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, true); } static void MergeDevNamesError(const string& name_a, const string& name_b, const string& expected_error_substr) { DeviceNameUtils::ParsedName target_a = Name(name_a); absl::Status s = DeviceNameUtils::MergeDevNames(&target_a, Name(name_b)); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(absl::StrContains(s.message(), expected_error_substr)) << s; } static void MergeOverrideHelper(const string& target, const string& name, const string& expected_merge_name) { DeviceNameUtils::ParsedName parsed_target = Name(target); TF_EXPECT_OK( DeviceNameUtils::MergeOverrideDevNames(&parsed_target, Name(name))); DeviceNameUtils::ParsedName parsed_expected = Name(expected_merge_name); EXPECT_EQ(parsed_target, parsed_expected) << "parsed_target: " << DeviceNameUtils::ParsedNameToString(parsed_target) << " expected_name: " << DeviceNameUtils::ParsedNameToString(parsed_expected); } static void MergeUnsetDevNamesHelper(const string& name_a, const string& name_b, const string& expected_merge_name_ab, const string& expected_merge_name_ba) { DeviceNameUtils::ParsedName target_a = Name(name_a); DeviceNameUtils::MergeUnsetDevNames(&target_a, Name(name_b)); EXPECT_EQ(target_a, Name(expected_merge_name_ab)); DeviceNameUtils::ParsedName target_b = Name(name_b); DeviceNameUtils::MergeUnsetDevNames(&target_b, Name(name_a)); EXPECT_EQ(target_b, Name(expected_merge_name_ba)); } TEST(DeviceNameUtilsTest, MergeDevNames) { MergeDevNamesHelper("", "", ""); MergeDevNamesHelper("/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1"); MergeDevNamesHelper("", "/job:foo", "/job:foo"); MergeDevNamesHelper("", "/replica:2", "/replica:2"); MergeDevNamesHelper("", "/task:7", "/task:7"); MergeDevNamesHelper("", "/device:GPU:1", "/device:GPU:1"); MergeDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7"); MergeDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1"); MergeDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1", "/job:foo/replica:0/task:1"); MergeDevNamesHelper("", "/gpu:*", "/gpu:*"); MergeDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*"); MergeDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1"); MergeDevNamesError("/job:foo", "/job:bar", "incompatible jobs"); MergeDevNamesError("/replica:0", "/replica:1", "incompatible replicas"); MergeDevNamesError("/task:0", "/task:1", "incompatible tasks"); MergeDevNamesError("/gpu:*", "/cpu:*", "incompatible types"); MergeDevNamesError("/device:GPU:0", "/device:GPU:1", "incompatible ids"); } TEST(DeviceNameUtilsTest, MergeDevNamesAllowSoftPlacement) { MergeDevNamesHelperAllowSoftPlacement("/gpu:*", "/cpu:1", ""); MergeDevNamesHelperAllowSoftPlacement("/cpu:*", "/device:GPU:1", ""); MergeDevNamesHelperAllowSoftPlacement("/device:GPU:1", "/device:GPU:2", "/device:GPU:*"); } TEST(DeviceNameUtilsTest, MergeOverrideDevNames) { MergeOverrideHelper("", "", ""); MergeOverrideHelper("/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1"); MergeOverrideHelper("", "/job:foo", "/job:foo"); MergeOverrideHelper("", "/replica:2", "/replica:2"); MergeOverrideHelper("", "/task:7", "/task:7"); MergeOverrideHelper("", "/device:GPU:1", "/device:GPU:1"); MergeOverrideHelper("/job:foo", "/task:7", "/job:foo/task:7"); MergeOverrideHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1"); MergeOverrideHelper("/job:foo/replica:0", "/replica:0/task:1", "/job:foo/replica:0/task:1"); MergeOverrideHelper("", "/gpu:*", "/gpu:*"); MergeOverrideHelper("/gpu:*", "/gpu:*", "/gpu:*"); MergeOverrideHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1"); MergeOverrideHelper("/gpu:0", "/cpu:1", "/cpu:1"); MergeOverrideHelper("/gpu:*", "/cpu:1", "/cpu:1"); MergeOverrideHelper("/cpu:*", "/device:GPU:1", "/gpu:1"); MergeOverrideHelper("/device:GPU:1", "/device:GPU:2", "/device:GPU:2"); MergeOverrideHelper("/job:foo/CPU:*", "/device:GPU:1", "/job:foo/GPU:1"); MergeOverrideHelper("/cpu:*", "/job:foo/device:GPU:1", "/job:foo/GPU:1"); MergeOverrideHelper("/task:0/cpu:*", "/device:GPU:1", "/task:0/GPU:1"); MergeOverrideHelper("/cpu:*", "/task:0/device:GPU:1", "/task:0/GPU:1"); } TEST(DeviceNameUtilsTest, MergeUnsetDevNames) { MergeUnsetDevNamesHelper("", "", "", ""); MergeUnsetDevNamesHelper( "/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1"); MergeUnsetDevNamesHelper("", "/job:foo", "/job:foo", "/job:foo"); MergeUnsetDevNamesHelper("", "/replica:2", "/replica:2", "/replica:2"); MergeUnsetDevNamesHelper("", "/task:7", "/task:7", "/task:7"); MergeUnsetDevNamesHelper("", "/device:GPU:1", "/device:GPU:1", "/device:GPU:1"); MergeUnsetDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7", "/job:foo/task:7"); MergeUnsetDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1", "/job:foo/device:GPU:1"); MergeUnsetDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1", "/job:foo/replica:0/task:1", "/job:foo/replica:0/task:1"); MergeUnsetDevNamesHelper("", "/gpu:*", "/gpu:*", "/gpu:*"); MergeUnsetDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*", "/gpu:*"); MergeUnsetDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1", "/device:GPU:1"); MergeUnsetDevNamesHelper("/job:foo", "/job:bar", "/job:foo", "/job:bar"); MergeUnsetDevNamesHelper("/replica:0", "/replica:1", "/replica:0", "/replica:1"); MergeUnsetDevNamesHelper("/task:0", "/task:1", "/task:0", "/task:1"); MergeUnsetDevNamesHelper("/gpu:*", "/cpu:*", "/gpu:*", "/cpu:*"); MergeUnsetDevNamesHelper("/device:GPU:0", "/device:GPU:1", "/device:GPU:0", "/device:GPU:1"); MergeUnsetDevNamesHelper("/job:foo/device:GPU", "/job:bar", "/job:foo/device:GPU", "/job:bar/device:GPU"); } TEST(DeviceNameUtilsTest, GetNamesForDeviceMappings) { DeviceNameUtils::ParsedName p = Name("/job:foo/replica:10/task:0/device:GPU:1"); EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","), "/job:foo/replica:10/task:0/device:GPU:1," "/job:foo/replica:10/task:0/gpu:1"); p.has_task = false; EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","), ""); } TEST(DeviceNameUtilsTest, CanonicalizeDeviceName) { string canonical_name; { string basename = "/job:foo/replica:10/task:0/device:CPU:0"; TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName( "/job:foo/replica:10/task:0/device:CPU:1", basename, &canonical_name)); EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name); TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName( "/job:foo/task:0/replica:10/device:CPU:1", basename, &canonical_name)); EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name); TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName( "/job:foo/task:0/replica:10/cpu:1", basename, &canonical_name)); EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name); TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName("CPU:0", basename, &canonical_name)); EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:0", canonical_name); absl::Status s = DeviceNameUtils::CanonicalizeDeviceName( "/job:foo/task:0/replica/cpu:1", basename, &canonical_name); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); EXPECT_EQ("", canonical_name); } { string fullname = "/device:CPU:0"; absl::Status s = DeviceNameUtils::CanonicalizeDeviceName( fullname, "/device:CPU:0", &canonical_name); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); EXPECT_EQ("", canonical_name); s = DeviceNameUtils::CanonicalizeDeviceName( fullname, "/job:foo/task:0/replica/cpu:1", &canonical_name); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); EXPECT_EQ("", canonical_name); } } TEST(DeviceNameUtilsTest, CompareFullNames) { EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/cpu:0", "/job:foo/replica:0/task:0/cpu:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:1", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:1/device:CPU:0", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:1/task:0/device:CPU:0", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:goo/replica:0/task:0/device:CPU:0", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:GPU:0", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/job:foo/replica:0/task:0/device:CPU:1")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/job:foo/replica:0/task:1/device:CPU:0")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/job:foo/replica:1/task:0/device:CPU:0")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/job:goo/replica:0/task:0/device:CPU:0")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/job:foo/replica:0/task:0/device:GPU:0")); EXPECT_FALSE( DeviceNameUtils::CompareFullNames("/device:CPU:1", "unparseablename")); EXPECT_TRUE( DeviceNameUtils::CompareFullNames("unparseablename", "/device:CPU:1")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/replica:0/task:0/device:CPU:1", "/job:foo/replica:0/task:0/device:CPU:0")); EXPECT_FALSE(DeviceNameUtils::CompareFullNames( "/job:foo/replica:0/task:0/device:CPU:0", "/replica:0/task:0/device:CPU:0")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames( "/replica:0/task:0/device:CPU:0", "/replica:0/task:0/device:CPU:1")); EXPECT_TRUE(DeviceNameUtils::CompareFullNames("/task:0/device:CPU:0", "/task:0/device:CPU:1")); EXPECT_TRUE( DeviceNameUtils::CompareFullNames("/device:CPU:0", "/device:CPU:1")); } static void BM_ParseFullName(::testing::benchmark::State& state) { DeviceNameUtils::ParsedName p; for (auto s : state) { DeviceNameUtils::ParseFullName("/job:worker/replica:3/task:0/cpu:0", &p); } } BENCHMARK(BM_ParseFullName); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2cd5293f-5da0-4198-a703-16efdd40591a
cpp
tensorflow/tensorflow
collective_permute_motion
third_party/xla/xla/service/spmd/collective_permute_motion.cc
third_party/xla/xla/service/spmd/collective_permute_motion_test.cc
#include "xla/service/spmd/collective_permute_motion.h" #include <cstdint> #include <deque> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/while_loop_analysis.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { absl::flat_hash_set<HloInstruction*> FindLoopConsts(HloComputation* body) { HloInstruction* root = body->root_instruction(); CHECK_EQ(root->opcode(), HloOpcode::kTuple); absl::flat_hash_set<HloInstruction*> loop_consts; for (int64_t i = 0; i < root->operand_count(); ++i) { HloInstruction* output = root->mutable_operand(i); while (output->opcode() == HloOpcode::kReshape || output->opcode() == HloOpcode::kCopy) { output = output->mutable_operand(0); } if (output->opcode() == HloOpcode::kGetTupleElement && output->tuple_index() == i && output->operand(0) == body->parameter_instruction(0)) { loop_consts.insert(output); } } for (HloInstruction* inst : body->MakeInstructionPostOrder()) { if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota || inst->opcode() == HloOpcode::kReplicaId || inst->opcode() == HloOpcode::kPartitionId) { loop_consts.insert(inst); continue; } if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast && inst->opcode() != HloOpcode::kReduce && inst->opcode() != HloOpcode::kReshape && inst->opcode() != HloOpcode::kDynamicSlice && inst->opcode() != HloOpcode::kTranspose) { continue; } if (inst->HasSideEffectNoRecurse()) { continue; } if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) { return loop_consts.contains(operand); })) { loop_consts.insert(inst); } } return loop_consts; } constexpr int64_t kMaxMovableClusterSize = 8; struct MovableCluster { int64_t root_tuple_index; std::vector<HloInstruction*> reverse_order_instructions; HloInstruction* collective_permute = nullptr; }; std::optional<MovableCluster> FindMovableClusterAtBodyRoot( HloComputation* body, int64_t root_tuple_index, const absl::flat_hash_set<HloInstruction*>& loop_consts) { HloInstruction* root = body->root_instruction(); CHECK_EQ(root->opcode(), HloOpcode::kTuple); MovableCluster cluster; cluster.root_tuple_index = root_tuple_index; std::deque<HloInstruction*> queue; queue.push_back(root->mutable_operand(root_tuple_index)); while (!queue.empty()) { HloInstruction* visiting = queue.front(); queue.pop_front(); if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) { VLOG(2) << "Cannot move: too many instructions to move"; return std::nullopt; } if (visiting->user_count() > 1) { VLOG(2) << "Cannot move: " << visiting->name() << " used multiple times"; return std::nullopt; } cluster.reverse_order_instructions.push_back(visiting); if (visiting->opcode() == HloOpcode::kCollectivePermute) { if (cluster.collective_permute != nullptr) { VLOG(2) << "Cannot move: " << visiting->name() << " multiple collective permutes"; return std::nullopt; } cluster.collective_permute = visiting; continue; } if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) { VLOG(2) << "Cannot move: " << visiting->name() << " unsupported op"; return std::nullopt; } for (HloInstruction* operand : visiting->mutable_operands()) { if (!loop_consts.contains(operand)) { queue.push_back(operand); } } } if (cluster.collective_permute == nullptr) { return std::nullopt; } return cluster; } absl::flat_hash_set<int64_t> FindIndicesUnusedAfterLoop(HloInstruction* loop) { absl::flat_hash_set<int64_t> indices; int64_t count = loop->shape().tuple_shapes_size(); for (int64_t i = 0; i < count; ++i) { indices.insert(i); } for (HloInstruction* user : loop->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { indices.clear(); break; } indices.erase(user->tuple_index()); } return indices; } absl::StatusOr<bool> MoveCollectivePermutes(HloComputation* computation, HloInstruction* loop) { HloComputation* body = loop->while_body(); HloInstruction* root = body->root_instruction(); if (root->opcode() != HloOpcode::kTuple || loop->operand(0)->opcode() != HloOpcode::kTuple) { return false; } auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop); if (!maybe_induction_var_idx.has_value()) { VLOG(2) << "Skip " << loop->name() << ", no induction var"; return false; } absl::flat_hash_map<const HloInstruction*, int64_t> output_appear_counts; for (const HloInstruction* operand : root->operands()) { auto res = output_appear_counts.emplace(operand, 1); if (!res.second) { res.first->second++; } } absl::flat_hash_set<int64_t> unused_indices_after_loop = FindIndicesUnusedAfterLoop(loop); const absl::flat_hash_set<HloInstruction*> loop_consts = FindLoopConsts(body); int64_t induction_var_idx = *maybe_induction_var_idx; std::vector<HloInstruction*> input_gtes(root->operand_count(), nullptr); absl::flat_hash_set<int64_t> multi_use_indices; for (HloInstruction* user : body->parameter_instruction(0)->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { VLOG(2) << "Skip " << loop->name() << ", non-GTE input use"; return false; } if (multi_use_indices.contains(user->tuple_index())) { continue; } if (input_gtes[user->tuple_index()] != nullptr) { multi_use_indices.insert(user->tuple_index()); input_gtes[user->tuple_index()] = nullptr; } else { input_gtes[user->tuple_index()] = user; } } HloInstruction* ind_var = input_gtes[induction_var_idx]; if (ind_var == nullptr || ind_var->shape().rank() > 0) { VLOG(2) << "Skip " << loop->name() << ", non-scalar induction var"; return false; } if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd && root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) { VLOG(2) << "Skip " << loop->name() << ", non-add/sub induction var"; return false; } if (root->operand(induction_var_idx)->operand(0) == ind_var) { if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) { VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var"; return false; } } else if (root->operand(induction_var_idx)->operand(1) == ind_var) { if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) { VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var"; return false; } } else { return false; } HloInstruction* ind_var_orig = loop->mutable_operand(0)->mutable_operand(induction_var_idx); if (!ind_var_orig->IsConstant()) { VLOG(2) << "Skip " << loop->name() << ", non-constant initial induction var"; return false; } bool changed = false; std::vector<MovableCluster> movable_outputs; for (int64_t i = 0; i < root->operand_count(); ++i) { if (output_appear_counts[root->operand(i)] > 1) { VLOG(2) << "Skip " << loop->name() << " index " << i << " appears multiple times in output."; continue; } if (!unused_indices_after_loop.contains(i)) { VLOG(2) << "Skip " << loop->name() << " index " << i << " used after loop."; continue; } auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts); if (!cluster.has_value()) { VLOG(2) << "Skip " << loop->name() << " index " << i << " did not find a movable cluster."; continue; } HloInstruction* input = input_gtes[cluster->root_tuple_index]; HloInstruction* cp = cluster->collective_permute; if (input == nullptr || cp->operand(0) == input) { VLOG(2) << "Skip " << loop->name() << " index " << i << " collective-permute already at top."; continue; } const std::vector<HloInstruction*> original_input_users = input->users(); absl::flat_hash_map<const HloInstruction*, HloInstruction*> replacement; replacement[cp->operand(0)] = input; for (auto it = cluster->reverse_order_instructions.rbegin(); it != cluster->reverse_order_instructions.rend(); ++it) { HloInstruction* inst = *it; std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : inst->mutable_operands()) { auto rit = replacement.find(operand); if (rit != replacement.end()) { new_operands.push_back(rit->second); } else { new_operands.push_back(operand); } } HloInstruction* clone = body->AddInstruction( inst->CloneWithNewOperands(inst->shape(), new_operands)); replacement[inst] = clone; } HloInstruction* new_input = replacement[cluster->reverse_order_instructions[0]]; if (ind_var_orig->parent() != body) { ind_var_orig = body->AddInstruction(ind_var_orig->Clone()); } HloInstruction* is_first_iter = body->AddInstruction(HloInstruction::CreateBroadcast( ShapeUtil::ChangeElementType(new_input->shape(), PRED), body->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig, Comparison::Direction::kEq)), {})); new_input = body->AddInstruction( HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect, is_first_iter, input, new_input)); for (HloInstruction* user : original_input_users) { TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input)); } TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index, cp->mutable_operand(0))); TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands( cluster->reverse_order_instructions[0])); VLOG(2) << "Moved " << loop->name() << " index " << i; changed = true; } return changed; } absl::StatusOr<bool> CollectivePermuteMotion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instr : computation->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kWhile) { TF_ASSIGN_OR_RETURN(bool moved, MoveCollectivePermutes(computation, instr)); changed |= moved; } } } return changed; } }
#include "xla/service/spmd/collective_permute_motion.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace { using CollectivePermuteMotionTest = HloTestBase; namespace op = xla::testing::opcode_matchers; TEST_F(CollectivePermuteMotionTest, SimpleMove) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} ROOT tuple = (s32[], f32[4,4]) tuple(add, cp) } cond { loop_var = (s32[], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param) while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_TRUE(pass.Run(&*module).value()); VLOG(1) << module->ToString(); const HloInstruction* loop = FindInstruction(module.get(), "while"); const HloInstruction* output = loop->while_body()->root_instruction()->operand(1); auto input = AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0))); auto cp = op::CollectivePermute(input); auto select = op::Select(op::Broadcast(op::Compare()), input, cp); EXPECT_THAT(output, op::Multiply(select, select)); } TEST_F(CollectivePermuteMotionTest, NoCollectivePermute) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[], f32[]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[] get-tuple-element(loop_var), index=1 constant.4 = f32[] constant(4.0) ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1) } cond { loop_var = (s32[], f32[], f32[]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[] parameter(0) param.1 = f32[] parameter(1) tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1) while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_FALSE(pass.Run(&*module).value()); } TEST_F(CollectivePermuteMotionTest, MoveWithElementwise) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} constant.4 = f32[] constant(1) broadcast = f32[4,4] broadcast(constant.4), dimensions={} add1 = f32[4,4] add(cp, broadcast) ROOT tuple = (s32[], f32[4,4]) tuple(add, add1) } cond { loop_var = (s32[], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param) while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_TRUE(pass.Run(&*module).value()); VLOG(1) << module->ToString(); const HloInstruction* loop = FindInstruction(module.get(), "while"); const HloInstruction* output = loop->while_body()->root_instruction()->operand(1); auto input = AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0))); auto moved = op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant())); auto select = op::Select(op::Broadcast(op::Compare()), input, moved); EXPECT_THAT(output, op::Multiply(select, select)); } TEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} constant.4 = f32[] constant(1) nonconst = f32[4,4] custom-call(), custom_call_target="unknown" add1 = f32[4,4] add(cp, nonconst) ROOT tuple = (s32[], f32[4,4]) tuple(add, add1) } cond { loop_var = (s32[], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param) while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_FALSE(pass.Run(&*module).value()); } TEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} ROOT tuple = (s32[], f32[4,4]) tuple(add, cp) } cond { loop_var = (s32[], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param) while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = f32[4,4] get-tuple-element(while), index=1 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_FALSE(pass.Run(&*module).value()); } TEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 custom = s32[] custom-call(gte0, constant.1), custom_call_target="unknown" gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp) } cond { loop_var = (s32[], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param) while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_FALSE(pass.Run(&*module).value()); } TEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) { absl::string_view hlo_string = R"( HloModule test body { loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0) constant.1 = s32[] constant(1) gte0 = s32[] get-tuple-element(loop_var), index=0 add = s32[] add(gte0, constant.1) gte1 = f32[4,4] get-tuple-element(loop_var), index=1 mul = f32[4,4] multiply(gte1, gte1) cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}} ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp) } cond { loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0) gte.cond = s32[] get-tuple-element(loop_var), index=0 constant.3 = s32[] constant(5) ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT } ENTRY main { constant.2 = s32[] constant(0) param = f32[4,4] parameter(0) tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param) while = (s32[], f32[4,4], f32[4,4]) while(tuple.1), condition=cond, body=body ROOT result = s32[] get-tuple-element(while), index=0 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); CollectivePermuteMotion pass; ASSERT_FALSE(pass.Run(&*module).value()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ed71d17b-19eb-4298-88f0-ac7a9381d1f5
cpp
abseil/abseil-cpp
str_split
absl/strings/str_split.cc
absl/strings/str_split_test.cc
#include "absl/strings/str_split.h" #include <algorithm> #include <cstddef> #include <cstdlib> #include <cstring> #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { template <typename FindPolicy> absl::string_view GenericFind(absl::string_view text, absl::string_view delimiter, size_t pos, FindPolicy find_policy) { if (delimiter.empty() && text.length() > 0) { return absl::string_view(text.data() + pos + 1, 0); } size_t found_pos = absl::string_view::npos; absl::string_view found(text.data() + text.size(), 0); found_pos = find_policy.Find(text, delimiter, pos); if (found_pos != absl::string_view::npos) { found = absl::string_view(text.data() + found_pos, find_policy.Length(delimiter)); } return found; } struct LiteralPolicy { static size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) { return text.find(delimiter, pos); } static size_t Length(absl::string_view delimiter) { return delimiter.length(); } }; struct AnyOfPolicy { static size_t Find(absl::string_view text, absl::string_view delimiter, size_t pos) { return text.find_first_of(delimiter, pos); } static size_t Length(absl::string_view ) { return 1; } }; } ByString::ByString(absl::string_view sp) : delimiter_(sp) {} absl::string_view ByString::Find(absl::string_view text, size_t pos) const { if (delimiter_.length() == 1) { size_t found_pos = text.find(delimiter_[0], pos); if (found_pos == absl::string_view::npos) return absl::string_view(text.data() + text.size(), 0); return text.substr(found_pos, 1); } return GenericFind(text, delimiter_, pos, LiteralPolicy()); } absl::string_view ByAsciiWhitespace::Find(absl::string_view text, size_t pos) const { return GenericFind(text, " \t\v\f\r\n", pos, AnyOfPolicy()); } absl::string_view ByChar::Find(absl::string_view text, size_t pos) const { size_t found_pos = text.find(c_, pos); if (found_pos == absl::string_view::npos) return absl::string_view(text.data() + text.size(), 0); return text.substr(found_pos, 1); } ByAnyChar::ByAnyChar(absl::string_view sp) : delimiters_(sp) {} absl::string_view ByAnyChar::Find(absl::string_view text, size_t pos) const { return GenericFind(text, delimiters_, pos, AnyOfPolicy()); } ByLength::ByLength(ptrdiff_t length) : length_(length) { ABSL_RAW_CHECK(length > 0, ""); } absl::string_view ByLength::Find(absl::string_view text, size_t pos) const { pos = std::min(pos, text.size()); absl::string_view substr = text.substr(pos); if (substr.length() <= static_cast<size_t>(length_)) return absl::string_view(text.data() + text.size(), 0); return absl::string_view(substr.data() + length_, 0); } ABSL_NAMESPACE_END }
#include "absl/strings/str_split.h" #include <cstddef> #include <cstdint> #include <deque> #include <initializer_list> #include <list> #include <map> #include <memory> #include <set> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/macros.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_map.h" #include "absl/strings/string_view.h" namespace { using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; TEST(Split, TraitsTest) { static_assert(!absl::strings_internal::SplitterIsConvertibleTo<int>::value, ""); static_assert( !absl::strings_internal::SplitterIsConvertibleTo<std::string>::value, ""); static_assert(absl::strings_internal::SplitterIsConvertibleTo< std::vector<std::string>>::value, ""); static_assert( !absl::strings_internal::SplitterIsConvertibleTo<std::vector<int>>::value, ""); static_assert(absl::strings_internal::SplitterIsConvertibleTo< std::vector<absl::string_view>>::value, ""); static_assert(absl::strings_internal::SplitterIsConvertibleTo< std::map<std::string, std::string>>::value, ""); static_assert(absl::strings_internal::SplitterIsConvertibleTo< std::map<absl::string_view, absl::string_view>>::value, ""); static_assert(!absl::strings_internal::SplitterIsConvertibleTo< std::map<int, std::string>>::value, ""); static_assert(!absl::strings_internal::SplitterIsConvertibleTo< std::map<std::string, int>>::value, ""); } TEST(Split, APIExamples) { { std::vector<std::string> v = absl::StrSplit("a,b,c", ","); EXPECT_THAT(v, ElementsAre("a", "b", "c")); using absl::ByString; v = absl::StrSplit("a,b,c", ByString(",")); EXPECT_THAT(v, ElementsAre("a", "b", "c")); EXPECT_THAT(absl::StrSplit("a,b,c", ByString(",")), ElementsAre("a", "b", "c")); } { std::vector<std::string> v = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); using absl::ByChar; v = absl::StrSplit("a,b,c", ByChar(',')); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { const std::vector<std::string> v = absl::StrSplit("a=>b=>c", "=>"); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::vector<absl::string_view> v = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::vector<std::string> v = absl::StrSplit(",a,b,c,", ','); EXPECT_THAT(v, ElementsAre("", "a", "b", "c", "")); } { std::vector<std::string> v = absl::StrSplit("abc", ','); EXPECT_THAT(v, ElementsAre("abc")); } { std::vector<std::string> v = absl::StrSplit("abc", ""); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::string embedded_nulls("a\0b\0c", 5); std::string null_delim("\0", 1); std::vector<std::string> v = absl::StrSplit(embedded_nulls, null_delim); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::pair<std::string, std::string> p = absl::StrSplit("a,b,c", ','); EXPECT_EQ("a", p.first); EXPECT_EQ("b", p.second); } { std::set<std::string> v = absl::StrSplit("a,b,c,a,b,c,a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { char a[] = ","; char* d = a + 0; std::vector<std::string> v = absl::StrSplit("a,b,c", d); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { using absl::ByAnyChar; std::vector<std::string> v = absl::StrSplit("a,b;c", ByAnyChar(",;")); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { using absl::SkipWhitespace; std::vector<std::string> v = absl::StrSplit(" a , ,,b,", ',', SkipWhitespace()); EXPECT_THAT(v, ElementsAre(" a ", "b")); } { using absl::ByLength; std::vector<std::string> v = absl::StrSplit("abcdefg", ByLength(3)); EXPECT_THAT(v, ElementsAre("abc", "def", "g")); } { std::vector<std::string> v1 = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v1, ElementsAre("a", "b", "c")); std::vector<std::string> v2(absl::StrSplit("a,b,c", ',')); EXPECT_THAT(v2, ElementsAre("a", "b", "c")); auto v3 = std::vector<std::string>(absl::StrSplit("a,b,c", ',')); EXPECT_THAT(v3, ElementsAre("a", "b", "c")); v3 = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v3, ElementsAre("a", "b", "c")); } { std::map<std::string, std::string> m = absl::StrSplit("a,1,b,2,a,3", ','); EXPECT_EQ(2, m.size()); EXPECT_EQ("3", m["a"]); EXPECT_EQ("2", m["b"]); } { std::multimap<std::string, std::string> m = absl::StrSplit("a,1,b,2,a,3", ','); EXPECT_EQ(3, m.size()); auto it = m.find("a"); EXPECT_EQ("1", it->second); ++it; EXPECT_EQ("3", it->second); it = m.find("b"); EXPECT_EQ("2", it->second); } { std::string s = "x,x,x,x,x,x,x"; for (absl::string_view sp : absl::StrSplit(s, ',')) { EXPECT_EQ("x", sp); } } { using absl::SkipWhitespace; std::string s = " ,x,,x,,x,x,x,,"; for (absl::string_view sp : absl::StrSplit(s, ',', SkipWhitespace())) { EXPECT_EQ("x", sp); } } { std::map<std::string, std::string> m; for (absl::string_view sp : absl::StrSplit("a=b=c,d=e,f=,g", ',')) { m.insert(absl::StrSplit(sp, absl::MaxSplits('=', 1))); } EXPECT_EQ("b=c", m.find("a")->second); EXPECT_EQ("e", m.find("d")->second); EXPECT_EQ("", m.find("f")->second); EXPECT_EQ("", m.find("g")->second); } } TEST(SplitIterator, Basics) { auto splitter = absl::StrSplit("a,b", ','); auto it = splitter.begin(); auto end = splitter.end(); EXPECT_NE(it, end); EXPECT_EQ("a", *it); ++it; EXPECT_NE(it, end); EXPECT_EQ("b", std::string(it->data(), it->size())); it++; EXPECT_EQ(it, end); } class Skip { public: explicit Skip(const std::string& s) : s_(s) {} bool operator()(absl::string_view sp) { return sp != s_; } private: std::string s_; }; TEST(SplitIterator, Predicate) { auto splitter = absl::StrSplit("a,b,c", ',', Skip("b")); auto it = splitter.begin(); auto end = splitter.end(); EXPECT_NE(it, end); EXPECT_EQ("a", *it); ++it; EXPECT_NE(it, end); EXPECT_EQ("c", std::string(it->data(), it->size())); it++; EXPECT_EQ(it, end); } TEST(SplitIterator, EdgeCases) { struct { std::string in; std::vector<std::string> expect; } specs[] = { {"", {""}}, {"foo", {"foo"}}, {",", {"", ""}}, {",foo", {"", "foo"}}, {"foo,", {"foo", ""}}, {",foo,", {"", "foo", ""}}, {"foo,bar", {"foo", "bar"}}, }; for (const auto& spec : specs) { SCOPED_TRACE(spec.in); auto splitter = absl::StrSplit(spec.in, ','); auto it = splitter.begin(); auto end = splitter.end(); for (const auto& expected : spec.expect) { EXPECT_NE(it, end); EXPECT_EQ(expected, *it++); } EXPECT_EQ(it, end); } } TEST(Splitter, Const) { const auto splitter = absl::StrSplit("a,b,c", ','); EXPECT_THAT(splitter, ElementsAre("a", "b", "c")); } TEST(Split, EmptyAndNull) { EXPECT_THAT(absl::StrSplit(absl::string_view(""), '-'), ElementsAre("")); EXPECT_THAT(absl::StrSplit(absl::string_view(), '-'), ElementsAre()); } TEST(SplitIterator, EqualityAsEndCondition) { auto splitter = absl::StrSplit("a,b,c", ','); auto it = splitter.begin(); auto it2 = it; ++it2; ++it2; EXPECT_EQ("c", *it2); std::vector<absl::string_view> v; for (; it != it2; ++it) { v.push_back(*it); } EXPECT_THAT(v, ElementsAre("a", "b")); } TEST(Splitter, RangeIterators) { auto splitter = absl::StrSplit("a,b,c", ','); std::vector<absl::string_view> output; for (absl::string_view p : splitter) { output.push_back(p); } EXPECT_THAT(output, ElementsAre("a", "b", "c")); } template <typename ContainerType, typename Splitter> void TestConversionOperator(const Splitter& splitter) { ContainerType output = splitter; EXPECT_THAT(output, UnorderedElementsAre("a", "b", "c", "d")); } template <typename MapType, typename Splitter> void TestMapConversionOperator(const Splitter& splitter) { MapType m = splitter; EXPECT_THAT(m, UnorderedElementsAre(Pair("a", "b"), Pair("c", "d"))); } template <typename FirstType, typename SecondType, typename Splitter> void TestPairConversionOperator(const Splitter& splitter) { std::pair<FirstType, SecondType> p = splitter; EXPECT_EQ(p, (std::pair<FirstType, SecondType>("a", "b"))); } TEST(Splitter, ConversionOperator) { auto splitter = absl::StrSplit("a,b,c,d", ','); TestConversionOperator<std::vector<absl::string_view>>(splitter); TestConversionOperator<std::vector<std::string>>(splitter); TestConversionOperator<std::list<absl::string_view>>(splitter); TestConversionOperator<std::list<std::string>>(splitter); TestConversionOperator<std::deque<absl::string_view>>(splitter); TestConversionOperator<std::deque<std::string>>(splitter); TestConversionOperator<std::set<absl::string_view>>(splitter); TestConversionOperator<std::set<std::string>>(splitter); TestConversionOperator<std::multiset<absl::string_view>>(splitter); TestConversionOperator<std::multiset<std::string>>(splitter); TestConversionOperator<absl::btree_set<absl::string_view>>(splitter); TestConversionOperator<absl::btree_set<std::string>>(splitter); TestConversionOperator<absl::btree_multiset<absl::string_view>>(splitter); TestConversionOperator<absl::btree_multiset<std::string>>(splitter); TestConversionOperator<std::unordered_set<std::string>>(splitter); TestMapConversionOperator<std::map<absl::string_view, absl::string_view>>( splitter); TestMapConversionOperator<std::map<absl::string_view, std::string>>(splitter); TestMapConversionOperator<std::map<std::string, absl::string_view>>(splitter); TestMapConversionOperator<std::map<std::string, std::string>>(splitter); TestMapConversionOperator< std::multimap<absl::string_view, absl::string_view>>(splitter); TestMapConversionOperator<std::multimap<absl::string_view, std::string>>( splitter); TestMapConversionOperator<std::multimap<std::string, absl::string_view>>( splitter); TestMapConversionOperator<std::multimap<std::string, std::string>>(splitter); TestMapConversionOperator< absl::btree_map<absl::string_view, absl::string_view>>(splitter); TestMapConversionOperator<absl::btree_map<absl::string_view, std::string>>( splitter); TestMapConversionOperator<absl::btree_map<std::string, absl::string_view>>( splitter); TestMapConversionOperator<absl::btree_map<std::string, std::string>>( splitter); TestMapConversionOperator< absl::btree_multimap<absl::string_view, absl::string_view>>(splitter); TestMapConversionOperator< absl::btree_multimap<absl::string_view, std::string>>(splitter); TestMapConversionOperator< absl::btree_multimap<std::string, absl::string_view>>(splitter); TestMapConversionOperator<absl::btree_multimap<std::string, std::string>>( splitter); TestMapConversionOperator<std::unordered_map<std::string, std::string>>( splitter); TestMapConversionOperator< absl::node_hash_map<absl::string_view, absl::string_view>>(splitter); TestMapConversionOperator< absl::node_hash_map<absl::string_view, std::string>>(splitter); TestMapConversionOperator< absl::node_hash_map<std::string, absl::string_view>>(splitter); TestMapConversionOperator< absl::flat_hash_map<absl::string_view, absl::string_view>>(splitter); TestMapConversionOperator< absl::flat_hash_map<absl::string_view, std::string>>(splitter); TestMapConversionOperator< absl::flat_hash_map<std::string, absl::string_view>>(splitter); TestPairConversionOperator<absl::string_view, absl::string_view>(splitter); TestPairConversionOperator<absl::string_view, std::string>(splitter); TestPairConversionOperator<std::string, absl::string_view>(splitter); TestPairConversionOperator<std::string, std::string>(splitter); } TEST(Splitter, ToPair) { { std::pair<std::string, std::string> p = absl::StrSplit("", ','); EXPECT_EQ("", p.first); EXPECT_EQ("", p.second); } { std::pair<std::string, std::string> p = absl::StrSplit("a", ','); EXPECT_EQ("a", p.first); EXPECT_EQ("", p.second); } { std::pair<std::string, std::string> p = absl::StrSplit(",b", ','); EXPECT_EQ("", p.first); EXPECT_EQ("b", p.second); } { std::pair<std::string, std::string> p = absl::StrSplit("a,b", ','); EXPECT_EQ("a", p.first); EXPECT_EQ("b", p.second); } { std::pair<std::string, std::string> p = absl::StrSplit("a,b,c", ','); EXPECT_EQ("a", p.first); EXPECT_EQ("b", p.second); } } TEST(Splitter, Predicates) { static const char kTestChars[] = ",a, ,b,"; using absl::AllowEmpty; using absl::SkipEmpty; using absl::SkipWhitespace; { auto splitter = absl::StrSplit(kTestChars, ','); std::vector<std::string> v = splitter; EXPECT_THAT(v, ElementsAre("", "a", " ", "b", "")); } { auto splitter = absl::StrSplit(kTestChars, ',', AllowEmpty()); std::vector<std::string> v_allowempty = splitter; EXPECT_THAT(v_allowempty, ElementsAre("", "a", " ", "b", "")); auto splitter_nopredicate = absl::StrSplit(kTestChars, ','); std::vector<std::string> v_nopredicate = splitter_nopredicate; EXPECT_EQ(v_allowempty, v_nopredicate); } { auto splitter = absl::StrSplit(kTestChars, ',', SkipEmpty()); std::vector<std::string> v = splitter; EXPECT_THAT(v, ElementsAre("a", " ", "b")); } { auto splitter = absl::StrSplit(kTestChars, ',', SkipWhitespace()); std::vector<std::string> v = splitter; EXPECT_THAT(v, ElementsAre("a", "b")); } } TEST(Split, Basics) { { absl::StrSplit("a,b,c", ','); } { std::vector<absl::string_view> v = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::vector<std::string> v = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); } { std::vector<std::string> v; v = absl::StrSplit("a,b,c", ','); EXPECT_THAT(v, ElementsAre("a", "b", "c")); std::map<std::string, std::string> m; m = absl::StrSplit("a,b,c", ','); EXPECT_EQ(2, m.size()); std::unordered_map<std::string, std::string> hm; hm = absl::StrSplit("a,b,c", ','); EXPECT_EQ(2, hm.size()); } } absl::string_view ReturnStringView() { return "Hello World"; } const char* ReturnConstCharP() { return "Hello World"; } char* ReturnCharP() { return const_cast<char*>("Hello World"); } TEST(Split, AcceptsCertainTemporaries) { std::vector<std::string> v; v = absl::StrSplit(ReturnStringView(), ' '); EXPECT_THAT(v, ElementsAre("Hello", "World")); v = absl::StrSplit(ReturnConstCharP(), ' '); EXPECT_THAT(v, ElementsAre("Hello", "World")); v = absl::StrSplit(ReturnCharP(), ' '); EXPECT_THAT(v, ElementsAre("Hello", "World")); } TEST(Split, Temporary) { const char input[] = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u"; EXPECT_LT(sizeof(std::string), ABSL_ARRAYSIZE(input)) << "Input should be larger than fits on the stack."; auto splitter = absl::StrSplit(std::string(input), ','); std::string expected = "a"; for (absl::string_view letter : splitter) { EXPECT_EQ(expected, letter); ++expected[0]; } EXPECT_EQ("v", expected); auto std_splitter = absl::StrSplit(std::string(input), ','); expected = "a"; for (absl::string_view letter : std_splitter) { EXPECT_EQ(expected, letter); ++expected[0]; } EXPECT_EQ("v", expected); } template <typename T> static std::unique_ptr<T> CopyToHeap(const T& value) { return std::unique_ptr<T>(new T(value)); } TEST(Split, LvalueCaptureIsCopyable) { std::string input = "a,b"; auto heap_splitter = CopyToHeap(absl::StrSplit(input, ',')); auto stack_splitter = *heap_splitter; heap_splitter.reset(); std::vector<std::string> result = stack_splitter; EXPECT_THAT(result, testing::ElementsAre("a", "b")); } TEST(Split, TemporaryCaptureIsCopyable) { auto heap_splitter = CopyToHeap(absl::StrSplit(std::string("a,b"), ',')); auto stack_splitter = *heap_splitter; heap_splitter.reset(); std::vector<std::string> result = stack_splitter; EXPECT_THAT(result, testing::ElementsAre("a", "b")); } TEST(Split, SplitterIsCopyableAndMoveable) { auto a = absl::StrSplit("foo", '-'); auto b = a; auto c = std::move(a); b = c; c = std::move(b); EXPECT_THAT(c, ElementsAre("foo")); } TEST(Split, StringDelimiter) { { std::vector<absl::string_view> v = absl::StrSplit("a,b", ','); EXPECT_THAT(v, ElementsAre("a", "b")); } { std::vector<absl::string_view> v = absl::StrSplit("a,b", std::string(",")); EXPECT_THAT(v, ElementsAre("a", "b")); } { std::vector<absl::string_view> v = absl::StrSplit("a,b", absl::string_view(",")); EXPECT_THAT(v, ElementsAre("a", "b")); } } #if !defined(__cpp_char8_t) #if defined(__clang__) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++2a-compat" #endif TEST(Split, UTF8) { std::string utf8_string = u8"\u03BA\u1F79\u03C3\u03BC\u03B5"; { std::string to_split = "a," + utf8_string; std::vector<absl::string_view> v = absl::StrSplit(to_split, ','); EXPECT_THAT(v, ElementsAre("a", utf8_string)); } { std::string to_split = "a," + utf8_string + ",b"; std::string unicode_delimiter = "," + utf8_string + ","; std::vector<absl::string_view> v = absl::StrSplit(to_split, unicode_delimiter); EXPECT_THAT(v, ElementsAre("a", "b")); } { std::vector<absl::string_view> v = absl::StrSplit(u8"Foo h\u00E4llo th\u4E1Ere", absl::ByAnyChar(" \t")); EXPECT_THAT(v, ElementsAre("Foo", u8"h\u00E4llo", u8"th\u4E1Ere")); } } #if defined(__clang__) #pragma clang diagnostic pop #endif #endif TEST(Split, EmptyStringDelimiter) { { std::vector<std::string> v = absl::StrSplit("", ""); EXPECT_THAT(v, ElementsAre("")); } { std::vector<std::string> v = absl::StrSplit("a", ""); EXPECT_THAT(v, ElementsAre("a")); } { std::vector<std::string> v = absl::StrSplit("ab", ""); EXPECT_THAT(v, ElementsAre("a", "b")); } { std::vector<std::string> v = absl::StrSplit("a b", ""); EXPECT_THAT(v, ElementsAre("a", " ", "b")); } } TEST(Split, SubstrDelimiter) { std::vector<absl::string_view> results; absl::string_view delim(" results = absl::StrSplit("", delim); EXPECT_THAT(results, ElementsAre("")); results = absl::StrSplit(" EXPECT_THAT(results, ElementsAre("", "")); results = absl::StrSplit("ab", delim); EXPECT_THAT(results, ElementsAre("ab")); results = absl::StrSplit("ab EXPECT_THAT(results, ElementsAre("ab", "")); results = absl::StrSplit("ab/", delim); EXPECT_THAT(results, ElementsAre("ab/")); results = absl::StrSplit("a/b", delim); EXPECT_THAT(results, ElementsAre("a/b")); results = absl::StrSplit("a EXPECT_THAT(results, ElementsAre("a", "b")); results = absl::StrSplit("a EXPECT_THAT(results, ElementsAre("a", "/b")); results = absl::StrSplit("a EXPECT_THAT(results, ElementsAre("a", "", "b")); } TEST(Split, EmptyResults) { std::vector<absl::string_view> results; results = absl::StrSplit("", '#'); EXPECT_THAT(results, ElementsAre("")); results = absl::StrSplit("#", '#'); EXPECT_THAT(results, ElementsAre("", "")); results = absl::StrSplit("#cd", '#'); EXPECT_THAT(results, ElementsAre("", "cd")); results = absl::StrSplit("ab#cd#", '#'); EXPECT_THAT(results, ElementsAre("ab", "cd", "")); results = absl::StrSplit("ab##cd", '#'); EXPECT_THAT(results, ElementsAre("ab", "", "cd")); results = absl::StrSplit("ab##", '#'); EXPECT_THAT(results, ElementsAre("ab", "", "")); results = absl::StrSplit("ab#ab#", '#'); EXPECT_THAT(results, ElementsAre("ab", "ab", "")); results = absl::StrSplit("aaaa", 'a'); EXPECT_THAT(results, ElementsAre("", "", "", "", "")); results = absl::StrSplit("", '#', absl::SkipEmpty()); EXPECT_THAT(results, ElementsAre()); } template <typename Delimiter> static bool IsFoundAtStartingPos(absl::string_view text, Delimiter d, size_t starting_pos, int expected_pos) { absl::string_view found = d.Find(text, starting_pos); return found.data() != text.data() + text.size() && expected_pos == found.data() - text.data(); } template <typename Delimiter> static bool IsFoundAt(absl::string_view text, Delimiter d, int expected_pos) { const std::string leading_text = ",x,y,z,"; return IsFoundAtStartingPos(text, d, 0, expected_pos) && IsFoundAtStartingPos(leading_text + std::string(text), d, leading_text.length(), expected_pos + leading_text.length()); } template <typename Delimiter> void TestComma(Delimiter d) { EXPECT_TRUE(IsFoundAt(",", d, 0)); EXPECT_TRUE(IsFoundAt("a,", d, 1)); EXPECT_TRUE(IsFoundAt(",b", d, 0)); EXPECT_TRUE(IsFoundAt("a,b", d, 1)); EXPECT_TRUE(IsFoundAt("a,b,", d, 1)); EXPECT_TRUE(IsFoundAt("a,b,c", d, 1)); EXPECT_FALSE(IsFoundAt("", d, -1)); EXPECT_FALSE(IsFoundAt(" ", d, -1)); EXPECT_FALSE(IsFoundAt("a", d, -1)); EXPECT_FALSE(IsFoundAt("a b c", d, -1)); EXPECT_FALSE(IsFoundAt("a;b;c", d, -1)); EXPECT_FALSE(IsFoundAt(";", d, -1)); } TEST(Delimiter, ByString) { using absl::ByString; TestComma(ByString(",")); ByString comma_string(","); TestComma(comma_string); absl::string_view abc("abc"); EXPECT_EQ(0, abc.find("")); ByString empty(""); EXPECT_FALSE(IsFoundAt("", empty, 0)); EXPECT_FALSE(IsFoundAt("a", empty, 0)); EXPECT_TRUE(IsFoundAt("ab", empty, 1)); EXPECT_TRUE(IsFoundAt("abc", empty, 1)); } TEST(Split, ByChar) { using absl::ByChar; TestComma(ByChar(',')); ByChar comma_char(','); TestComma(comma_char); } TEST(Delimiter, ByAnyChar) { using absl::ByAnyChar; ByAnyChar one_delim(","); EXPECT_TRUE(IsFoundAt(",", one_delim, 0)); EXPECT_TRUE(IsFoundAt("a,", one_delim, 1)); EXPECT_TRUE(IsFoundAt("a,b", one_delim, 1)); EXPECT_TRUE(IsFoundAt(",b", one_delim, 0)); EXPECT_FALSE(IsFoundAt("", one_delim, -1)); EXPECT_FALSE(IsFoundAt(" ", one_delim, -1)); EXPECT_FALSE(IsFoundAt("a", one_delim, -1)); EXPECT_FALSE(IsFoundAt("a;b;c", one_delim, -1)); EXPECT_FALSE(IsFoundAt(";", one_delim, -1)); ByAnyChar two_delims(",;"); EXPECT_TRUE(IsFoundAt(",", two_delims, 0)); EXPECT_TRUE(IsFoundAt(";", two_delims, 0)); EXPECT_TRUE(IsFoundAt(",;", two_delims, 0)); EXPECT_TRUE(IsFoundAt(";,", two_delims, 0)); EXPECT_TRUE(IsFoundAt(",;b", two_delims, 0)); EXPECT_TRUE(IsFoundAt(";,b", two_delims, 0)); EXPECT_TRUE(IsFoundAt("a;,", two_delims, 1)); EXPECT_TRUE(IsFoundAt("a,;", two_delims, 1)); EXPECT_TRUE(IsFoundAt("a;,b", two_delims, 1)); EXPECT_TRUE(IsFoundAt("a,;b", two_delims, 1)); EXPECT_FALSE(IsFoundAt("", two_delims, -1)); EXPECT_FALSE(IsFoundAt(" ", two_delims, -1)); EXPECT_FALSE(IsFoundAt("a", two_delims, -1)); EXPECT_FALSE(IsFoundAt("a=b=c", two_delims, -1)); EXPECT_FALSE(IsFoundAt("=", two_delims, -1)); ByAnyChar empty(""); EXPECT_FALSE(IsFoundAt("", empty, 0)); EXPECT_FALSE(IsFoundAt("a", empty, 0)); EXPECT_TRUE(IsFoundAt("ab", empty, 1)); EXPECT_TRUE(IsFoundAt("abc", empty, 1)); } TEST(Split, ByAsciiWhitespace) { using absl::ByAsciiWhitespace; using absl::SkipEmpty; std::vector<absl::string_view> results; results = absl::StrSplit("aaaa\n", ByAsciiWhitespace()); EXPECT_THAT(results, ElementsAre("aaaa", "")); results = absl::StrSplit("aaaa\n", ByAsciiWhitespace(), SkipEmpty()); EXPECT_THAT(results, ElementsAre("aaaa")); results = absl::StrSplit(" ", ByAsciiWhitespace()); EXPECT_THAT(results, ElementsAre("", "")); results = absl::StrSplit(" ", ByAsciiWhitespace(), SkipEmpty()); EXPECT_THAT(results, IsEmpty()); results = absl::StrSplit("a", ByAsciiWhitespace()); EXPECT_THAT(results, ElementsAre("a")); results = absl::StrSplit("", ByAsciiWhitespace()); EXPECT_THAT(results, ElementsAre("")); results = absl::StrSplit("", ByAsciiWhitespace(), SkipEmpty()); EXPECT_THAT(results, IsEmpty()); results = absl::StrSplit("a b\tc\n d\n", ByAsciiWhitespace()); EXPECT_THAT(results, ElementsAre("a", "b", "c", "", "", "d", "")); results = absl::StrSplit("a b\tc\n d \n", ByAsciiWhitespace(), SkipEmpty()); EXPECT_THAT(results, ElementsAre("a", "b", "c", "d")); results = absl::StrSplit("a\t\n\v\f\r b", ByAsciiWhitespace(), SkipEmpty()); EXPECT_THAT(results, ElementsAre("a", "b")); } TEST(Delimiter, ByLength) { using absl::ByLength; ByLength four_char_delim(4); EXPECT_TRUE(IsFoundAt("abcde", four_char_delim, 4)); EXPECT_TRUE(IsFoundAt("abcdefghijklmnopqrstuvwxyz", four_char_delim, 4)); EXPECT_TRUE(IsFoundAt("a b,c\nd", four_char_delim, 4)); EXPECT_FALSE(IsFoundAt("", four_char_delim, 0)); EXPECT_FALSE(IsFoundAt("a", four_char_delim, 0)); EXPECT_FALSE(IsFoundAt("ab", four_char_delim, 0)); EXPECT_FALSE(IsFoundAt("abc", four_char_delim, 0)); EXPECT_FALSE(IsFoundAt("abcd", four_char_delim, 0)); } TEST(Split, WorksWithLargeStrings) { #if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER) constexpr size_t kSize = (uint32_t{1} << 26) + 1; #else constexpr size_t kSize = (uint32_t{1} << 31) + 1; #endif if (sizeof(size_t) > 4) { std::string s(kSize, 'x'); s.back() = '-'; std::vector<absl::string_view> v = absl::StrSplit(s, '-'); EXPECT_EQ(2, v.size()); EXPECT_EQ('x', v[0][0]); EXPECT_EQ('x', v[0][1]); EXPECT_EQ('x', v[0][3]); EXPECT_EQ("", v[1]); } } TEST(SplitInternalTest, TypeTraits) { EXPECT_FALSE(absl::strings_internal::HasMappedType<int>::value); EXPECT_TRUE( (absl::strings_internal::HasMappedType<std::map<int, int>>::value)); EXPECT_FALSE(absl::strings_internal::HasValueType<int>::value); EXPECT_TRUE( (absl::strings_internal::HasValueType<std::map<int, int>>::value)); EXPECT_FALSE(absl::strings_internal::HasConstIterator<int>::value); EXPECT_TRUE( (absl::strings_internal::HasConstIterator<std::map<int, int>>::value)); EXPECT_FALSE(absl::strings_internal::IsInitializerList<int>::value); EXPECT_TRUE((absl::strings_internal::IsInitializerList< std::initializer_list<int>>::value)); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
22062a52-3ab1-495e-b0d6-3265e90401e0
cpp
abseil/abseil-cpp
memutil
absl/strings/internal/memutil.cc
absl/strings/internal/memutil_test.cc
#include "absl/strings/internal/memutil.h" #include <cstdlib> #include "absl/strings/ascii.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace strings_internal { int memcasecmp(const char* s1, const char* s2, size_t len) { const unsigned char* us1 = reinterpret_cast<const unsigned char*>(s1); const unsigned char* us2 = reinterpret_cast<const unsigned char*>(s2); for (size_t i = 0; i < len; i++) { unsigned char c1 = us1[i]; unsigned char c2 = us2[i]; if (c1 != c2) { c1 = c1 >= 'A' && c1 <= 'Z' ? c1 - 'A' + 'a' : c1; c2 = c2 >= 'A' && c2 <= 'Z' ? c2 - 'A' + 'a' : c2; const int diff = int{c1} - int{c2}; if (diff != 0) return diff; } } return 0; } } ABSL_NAMESPACE_END }
#include "absl/strings/internal/memutil.h" #include <cstdlib> #include "gtest/gtest.h" namespace { TEST(MemUtil, memcasecmp) { const char a[] = "hello there"; EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO there", sizeof("hello there") - 1), 0); EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO therf", sizeof("hello there") - 1), -1); EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO therf", sizeof("hello there") - 2), 0); EXPECT_EQ(absl::strings_internal::memcasecmp(a, "whatever", 0), 0); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
fcc9cb62-4002-4ced-aa88-1c3431b4b183
cpp
tensorflow/tensorflow
unbounded_thread_pool
tensorflow/core/data/unbounded_thread_pool.cc
tensorflow/core/data/unbounded_thread_pool_test.cc
#include "tensorflow/core/data/unbounded_thread_pool.h" #include <functional> #include <memory> #include <utility> #include "absl/memory/memory.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/resource.h" #include "tensorflow/core/platform/unbounded_work_queue.h" namespace tensorflow { namespace data { class UnboundedThreadPool::LogicalThreadWrapper : public Thread { public: explicit LogicalThreadWrapper(std::shared_ptr<Notification> done) : done_(std::move(done)) {} ~LogicalThreadWrapper() override { done_->WaitForNotification(); } private: std::shared_ptr<Notification> done_; }; class UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory { public: explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {} std::unique_ptr<Thread> StartThread(const string& name, std::function<void()> fn) override { auto done = std::make_shared<Notification>(); pool_->ScheduleOnWorkQueue(std::move(fn), done); return std::make_unique<LogicalThreadWrapper>(std::move(done)); } private: UnboundedThreadPool* const pool_; }; std::shared_ptr<ThreadFactory> UnboundedThreadPool::get_thread_factory() { return std::make_shared<LogicalThreadFactory>(this); } void UnboundedThreadPool::Schedule(std::function<void()> fn) { auto tagged_fn = [fn = std::move(fn)]() { tensorflow::ResourceTagger tag(kTFDataResourceTag, "ThreadPool"); fn(); }; ScheduleOnWorkQueue(std::move(tagged_fn), nullptr); } int UnboundedThreadPool::NumThreads() const { return -1; } int UnboundedThreadPool::CurrentThreadId() const { return -1; } namespace { void WorkQueueFunc(const std::function<void()>& fn, std::shared_ptr<Notification> done) { fn(); if (done) { done->Notify(); } } } void UnboundedThreadPool::ScheduleOnWorkQueue( std::function<void()> fn, std::shared_ptr<Notification> done) { unbounded_work_queue_.Schedule( std::bind(&WorkQueueFunc, std::move(fn), std::move(done))); } } }
#include "tensorflow/core/data/unbounded_thread_pool.h" #include <atomic> #include <memory> #include <vector> #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace data { namespace { TEST(UnboundedThreadPool, ConcurrentThreadCreation) { UnboundedThreadPool pool(Env::Default(), "test"); auto thread_factory = pool.get_thread_factory(); std::vector<std::unique_ptr<Thread>> threads; const int kNumThreadsToCreate = 10; std::atomic<int> i(0); for (int j = 0; j < kNumThreadsToCreate; ++j) { threads.push_back(thread_factory->StartThread("", [=, &i, &thread_factory]() { std::vector<std::unique_ptr<Thread>> nested_threads; for (int k = 0; k < kNumThreadsToCreate; ++k) { nested_threads.push_back( thread_factory->StartThread("", [&i]() { ++i; })); } nested_threads.clear(); })); } threads.clear(); EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate); } TEST(UnboundedThreadPool, MultipleBlockingThreads) { UnboundedThreadPool pool(Env::Default(), "test"); auto thread_factory = pool.get_thread_factory(); std::vector<std::unique_ptr<Thread>> threads; std::vector<int> round_sizes = {5, 10, 15, 20}; for (const int round_size : round_sizes) { Notification n; BlockingCounter bc(round_size); for (int j = 0; j < round_size; ++j) { threads.push_back(thread_factory->StartThread("", [&bc, &n]() { bc.DecrementCount(); n.WaitForNotification(); })); } bc.Wait(); n.Notify(); threads.clear(); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3bb57b45-0742-480e-8a24-200b67ed49fa
cpp
tensorflow/tensorflow
hlo_element_type_converter
third_party/xla/xla/service/hlo_element_type_converter.cc
third_party/xla/xla/service/hlo_element_type_converter_test.cc
#include "xla/service/hlo_element_type_converter.h" #include <memory> #include <string> #include <utility> #include <vector> #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/shape_util.h" #include "xla/types.h" #include "tsl/platform/errors.h" namespace xla { namespace { HloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) { if (hlo->shape().element_type() != type) { Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type); hlo = hlo->parent()->AddInstruction( HloInstruction::CreateConvert(shape, hlo)); } CHECK_EQ(hlo->shape().element_type(), type); return hlo; } bool HasOperandType(HloInstruction* hlo, PrimitiveType type) { for (HloInstruction* operand : hlo->operands()) { if (operand->shape().element_type() == type) { return true; } } return false; } Shape GetConvertedTupleShape(const Shape& shape, PrimitiveType from_type, PrimitiveType to_type) { std::vector<Shape> new_tuple_subshapes; const int64_t n = ShapeUtil::TupleElementCount(shape); new_tuple_subshapes.reserve(n); for (int64_t i = 0; i < n; ++i) { Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); CHECK(!subshape.IsTuple()); if (subshape.element_type() == from_type) { subshape = ShapeUtil::ChangeElementType(subshape, to_type); } new_tuple_subshapes.push_back(subshape); } return ShapeUtil::MakeTupleShape(new_tuple_subshapes); } HloInstruction* ConvertTupleElements(HloInstruction* hlo, const Shape& to_shape) { const Shape& shape = hlo->shape(); HloComputation* computation = hlo->parent(); std::vector<HloInstruction*> tuple_elements; for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { const Shape& ele_shape = ShapeUtil::GetTupleElementShape(shape, i); HloInstruction* element = computation->AddInstruction( HloInstruction::CreateGetTupleElement(ele_shape, hlo, i)); const Shape& to_ele_shape = ShapeUtil::GetTupleElementShape(to_shape, i); CHECK(!ele_shape.IsTuple()); if (ele_shape.element_type() != to_ele_shape.element_type()) { element = computation->AddInstruction( HloInstruction::CreateConvert(to_ele_shape, element)); } tuple_elements.push_back(element); } return computation->AddInstruction( HloInstruction::CreateTuple(tuple_elements)); } } HloElementTypeConverter::HloElementTypeConverter( PrimitiveType eliminate_type, PrimitiveType replace_with_type) : eliminate_type_(eliminate_type), replace_with_type_(replace_with_type) {} absl::StatusOr<bool> HloElementTypeConverter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 3, "HloElementTypeConverter::Run(), before:\n" + module->ToString()); if (eliminate_type_ == replace_with_type_) { return false; } HloCloneContext context(module); bool changed = false; for (auto* computation : module->computations(execution_threads)) { for (auto* hlo : computation->MakeInstructionPostOrder()) { const auto opcode = hlo->opcode(); if (opcode == HloOpcode::kParameter || opcode == HloOpcode::kConstant || opcode == HloOpcode::kTuple || opcode == HloOpcode::kConvert || opcode == HloOpcode::kBitcastConvert || opcode == HloOpcode::kGetTupleElement || opcode == HloOpcode::kInfeed || opcode == HloOpcode::kOutfeed) { continue; } if (opcode == HloOpcode::kCustomCall) { continue; } if (opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall || opcode == HloOpcode::kAllReduce || opcode == HloOpcode::kReduceScatter || opcode == HloOpcode::kAllReduceStart || opcode == HloOpcode::kFusion || opcode == HloOpcode::kMap || opcode == HloOpcode::kReduce || opcode == HloOpcode::kReduceWindow || opcode == HloOpcode::kScatter || opcode == HloOpcode::kSelectAndScatter || opcode == HloOpcode::kSort || opcode == HloOpcode::kConditional) { continue; } TF_RET_CHECK(hlo->called_computations().empty()) << hlo->ToString(); bool nullary = hlo->operands().empty(); bool wrong_element_type = hlo->shape().element_type() == eliminate_type_; bool should_eliminate_type = (nullary && wrong_element_type) || HasOperandType(hlo, eliminate_type_); if (!should_eliminate_type) { TF_RET_CHECK(hlo->shape().element_type() != eliminate_type_); continue; } std::vector<HloInstruction*> new_operands; const auto& operands = hlo->operands(); new_operands.reserve(operands.size()); for (HloInstruction* operand : operands) { if (operand->shape().element_type() == eliminate_type_) { operand = ToElementType(operand, replace_with_type_); } new_operands.push_back(operand); } HloInstruction* new_hlo; if (hlo->shape().element_type() == eliminate_type_) { Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), replace_with_type_); new_hlo = computation->AddInstruction( hlo->CloneWithNewOperands(shape, new_operands, &context)); TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo)); new_hlo = ToElementType(new_hlo, eliminate_type_); } else if (hlo->shape().IsTuple()) { Shape old_shape = hlo->shape(); Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_, replace_with_type_); new_hlo = computation->AddInstruction( hlo->CloneWithNewOperands(new_shape, new_operands, &context)); TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo)); new_hlo = ConvertTupleElements(new_hlo, old_shape); } else { new_hlo = computation->AddInstruction( hlo->CloneWithNewOperands(hlo->shape(), new_operands, &context)); TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo)); } TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_hlo)); TF_RETURN_IF_ERROR(hlo->DropAllControlDeps()); TF_RETURN_IF_ERROR(computation->RemoveInstruction(hlo)); changed = true; } } XLA_VLOG_LINES( 2, "HloElementTypeConverter::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/hlo_element_type_converter.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; using ::testing::Contains; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Not; using ::testing::ResultOf; using HloElementTypeConverterTest = HloTestBase; TEST_F(HloElementTypeConverterTest, CustomCallsNotConverted) { const std::string& hlo_string = R"( HloModule custom_call ENTRY CustomCall { constant = bf16[1]{0} constant({12345}) ROOT custom-call = bf16[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo" } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_FALSE(converted); } TEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) { const std::string& hlo_string = R"( HloModule InfeedOutfeed ENTRY RoundTrip16MiBR1.v2 { token0 = token[] after-all() infeed = (bf16[4]{0}, token[]) infeed(token0) ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0 outfeed = token[] outfeed(infeed.data, token0) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_FALSE(converted); } TEST_F(HloElementTypeConverterTest, OperationsInNestedTuplesConverted) { const std::string& hlo_string = R"( HloModule NestedTuples ENTRY NestedTuples.v5 { constant.2 = f32[2]{0} constant({1, 2}) constant.3 = bf16[2]{0} constant({42, 42}) add = bf16[2]{0} add(constant.2, constant.3) tuple = (f32[2]{0}, bf16[2]{0}) tuple(constant.2, add) constant.5 = bf16[2]{0} constant({22, 44}) ROOT tuple.1 = ((f32[2]{0}, bf16[2]{0}), bf16[2]{0}) tuple(tuple, constant.5) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_TRUE(converted); const HloInstruction* bf16_op = module->entry_computation()->root_instruction()->operand(0)->operand(1); EXPECT_THAT(bf16_op, op::Convert(op::Add(op::Constant(), op::Convert()))); } TEST_F(HloElementTypeConverterTest, BatchNormGradBF16Converted) { const std::string& hlo_string = R"( HloModule BatchNormGrad ENTRY BatchNormGrad.v6 { constant.4 = bf16[2,2,2,1]{3,2,1,0} constant({ { { {0}, {0} }, { {0}, {0} } }, { { {0}, {0} }, { {0}, {0} } } }) constant.5 = bf16[2]{0} constant({1, 1}) constant.6 = bf16[2]{0} constant({0, 0}) constant.7 = bf16[2]{0} constant({1, 1}) constant.8 = bf16[2,2,2,1]{3,2,1,0} constant({ { { {1}, {2} }, { {3}, {4} } }, { { {5}, {6} }, { {7}, {8} } } }) ROOT batch-norm-grad = (bf16[2,2,2,1]{3,2,1,0}, bf16[2]{0}, bf16[2]{0}) batch-norm-grad(constant.4, constant.5, constant.6, constant.7, constant.8), epsilon=0, feature_index=2 } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_TRUE(converted); const HloInstruction* tuple_instr = module->entry_computation()->root_instruction(); ::testing::Matcher<const ::xla::HloInstruction*> batch_norm = op::BatchNormGrad(); EXPECT_THAT(tuple_instr, op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)), op::Convert(op::GetTupleElement(batch_norm, 1)), op::Convert(op::GetTupleElement(batch_norm, 2)))); } TEST_F(HloElementTypeConverterTest, RngIsRemoved) { const std::string& hlo_string = R"( HloModule RngIsRemoved ENTRY main { constant.3 = bf16[] constant(0) constant.4 = bf16[] constant(1) ROOT rng = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_TRUE(converted); HloPredicate is_bf16_rng = [](const HloInstruction* inst) { return inst->shape().element_type() == BF16 && inst->opcode() == HloOpcode::kRng; }; EXPECT_THAT(module->entry_computation()->instructions(), Not(Contains(ResultOf(is_bf16_rng, Eq(true))))); } TEST_F(HloElementTypeConverterTest, RngCtrlDep) { const std::string& hlo_string = R"( HloModule RngIsRemoved ENTRY main { constant.3 = bf16[] constant(0) constant.4 = bf16[] constant(1) rng0 = bf16[1,2000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform ROOT rng1 = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), control-predecessors={%rng0}, distribution=rng_uniform } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter type_converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get())); EXPECT_TRUE(converted); HloInstruction *rng0, *rng1; for (auto* inst : module->entry_computation()->instructions()) { if (inst->opcode() == HloOpcode::kRng) { const Shape& shape = inst->shape(); ASSERT_EQ(shape.dimensions_size(), 3); ASSERT_TRUE(shape.dimensions(1) == 2000 || shape.dimensions(1) == 1000); if (shape.dimensions(1) == 2000) { rng0 = inst; } else { rng1 = inst; } } } EXPECT_THAT(rng0->control_successors(), ElementsAre(rng1)); EXPECT_THAT(rng1->control_predecessors(), ElementsAre(rng0)); } TEST_F(HloElementTypeConverterTest, BitcastConvertIsUnmodified) { const std::string& hlo_string = R"( HloModule test ENTRY test { p = bf16[] parameter(0) ROOT c = u16[] bitcast-convert(p) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); HloElementTypeConverter converter(BF16, F32); TF_ASSERT_OK_AND_ASSIGN(bool converted, RunHloPass(&converter, module.get())); EXPECT_FALSE(converted); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
59953132-5105-4aa4-93e8-9e703673d57a
cpp
tensorflow/tensorflow
flat_map_dataset_op
tensorflow/core/kernels/data/flat_map_dataset_op.cc
tensorflow/core/kernels/data/flat_map_dataset_op_test.cc
#include "tensorflow/core/kernels/data/flat_map_dataset_op.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <string> #include <utility> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h" #include "tensorflow/core/data/captured_function.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/flat_map_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/data/serialization_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/dataset_options.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/random/random.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #include "tsl/platform/thread_annotations.h" namespace tensorflow { namespace data { constexpr const char* const FlatMapDatasetOp::kDatasetType; constexpr const char* const FlatMapDatasetOp::kInputDataset; constexpr const char* const FlatMapDatasetOp::kOtherArguments; constexpr const char* const FlatMapDatasetOp::kFunc; constexpr const char* const FlatMapDatasetOp::kTarguments; constexpr const char* const FlatMapDatasetOp::kOutputTypes; constexpr const char* const FlatMapDatasetOp::kOutputShapes; constexpr int64_t kMaxRandomIndexingCardinality = 100; constexpr char kCycleLength[] = "cycle_length"; constexpr char kElementIndex[] = "element_index"; constexpr char kInputsSize[] = "inputs_size"; constexpr char kInputs[] = "inputs"; constexpr char kCurrentElementIteratorUninitialized[] = "current_element_iterator_uninitialized"; constexpr char kExhausted[] = "exhausted"; class FlatMapDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, const DatasetBase* input, std::unique_ptr<CapturedFunction> captured_func, const DataTypeVector& output_types, const std::vector<PartialTensorShape>& output_shapes) : DatasetBase(DatasetContext(ctx)), input_(input), captured_func_(std::move(captured_func)), output_types_(output_types), output_shapes_(output_shapes), random_access_handler_(ctx, input, *captured_func_) { input_->Ref(); random_indexing_compatible_ = input_->RandomIndexingCompatible(); if (random_indexing_compatible_.ok() && input_->Cardinality() > kMaxRandomIndexingCardinality) { random_indexing_compatible_ = absl::FailedPreconditionError( absl::StrCat("The cardinality of the input to ", type_string(), " is too large to support global shuffling. It is ", input_->Cardinality(), ", which is greater than ", kMaxRandomIndexingCardinality)); } } ~Dataset() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix)}); } const DataTypeVector& output_dtypes() const override { return output_types_; } const std::vector<PartialTensorShape>& output_shapes() const override { return output_shapes_; } string DebugString() const override { return name_utils::DatasetDebugString(kDatasetType); } int64_t CardinalityInternal(CardinalityOptions options) const override { if (options.compute_level() < CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) { return kUnknownCardinality; } absl::StatusOr<int64_t> cardinality = random_access_handler_.Cardinality(); if (!cardinality.ok()) { LOG(ERROR) << "Unable to compute cardinality for dataset " << DebugString() << " due to error: " << cardinality.status(); return kUnknownCardinality; } return *cardinality; } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { TF_RETURN_IF_ERROR(captured_func_->CheckExternalState()); return input_->CheckExternalState(); } absl::Status RandomIndexingCompatible() const override { return absl::UnimplementedError( "Please consider applying maps on each dataset, concatenating them " "into " "one dataset and apply global shuffle dataset op onto the " "dataset to achieve the same result as flat map with global " "shuffling."); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); std::vector<Node*> other_arguments; DataTypeVector other_arguments_types; TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments, &other_arguments_types)); AttrValue f; b->BuildAttrValue(captured_func_->func(), &f); AttrValue other_arguments_types_attr; b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr); TF_RETURN_IF_ERROR(b->AddDataset( this, {std::make_pair(0, input_graph_node)}, {std::make_pair(1, other_arguments)}, {std::make_pair(kFunc, f), std::make_pair(kTarguments, other_arguments_types_attr)}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params) {} bool SymbolicCheckpointCompatible() const override { return true; } Status Initialize(IteratorContext* ctx) override { mutex_lock l(mu_); input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry()); TF_RETURN_IF_ERROR( dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_)); return dataset()->captured_func_->Instantiate( ctx, &instantiated_captured_func_); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { if (ctx->index_mapper()) { return Get(ctx, out_tensors, end_of_sequence); } mutex_lock l(mu_); do { if (!input_impl_) { *end_of_sequence = true; return absl::OkStatus(); } if (current_element_iterator_) { bool end_of_element; auto nested_ctx = MakeNestedIteratorContext(ctx); TF_RETURN_IF_ERROR(current_element_iterator_->GetNext( &nested_ctx, out_tensors, &end_of_element)); ctx->MergeCheckpoint(nested_ctx.checkpoint()); if (!end_of_element) { *end_of_sequence = false; return absl::OkStatus(); } ctx->MergeCheckpoint(input_ckpt_.get()); ctx->PurgeCheckpoint(current_element_iterator_->prefix()); current_element_iterator_.reset(); } inputs_.clear(); auto input_ctx = std::make_unique<IteratorContext>(*ctx); TF_RETURN_IF_ERROR( input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence)); input_ckpt_->Merge(input_ctx->checkpoint()); if (*end_of_sequence) { input_impl_.reset(); return absl::OkStatus(); } TF_RETURN_IF_ERROR( BuildCurrentElementIteratorLocked(ctx, true)); } while (true); } Status SkipInternal(IteratorContext* ctx, int num_to_skip, bool* end_of_sequence, int* num_skipped) override { mutex_lock l(mu_); *num_skipped = 0; while (*num_skipped < num_to_skip) { if (!input_impl_) { *end_of_sequence = true; return absl::OkStatus(); } if (current_element_iterator_) { bool end_of_element; auto nested_ctx = MakeNestedIteratorContext(ctx); int last_num_skipped; TF_RETURN_IF_ERROR(current_element_iterator_->Skip( &nested_ctx, num_to_skip - *num_skipped, &end_of_element, &last_num_skipped)); *num_skipped += last_num_skipped; ctx->MergeCheckpoint(nested_ctx.checkpoint()); if (!end_of_element) { if (*num_skipped != num_to_skip) { return absl::InternalError(absl::StrFormat( "Expected `num_skipped` and `num_to_skip` to be the same. Got" " %d(num_skipped) and %d(num_to_skip)", *num_skipped, num_to_skip)); } continue; } ctx->MergeCheckpoint(input_ckpt_.get()); ctx->PurgeCheckpoint(current_element_iterator_->prefix()); current_element_iterator_.reset(); } inputs_.clear(); auto input_ctx = std::make_unique<IteratorContext>(*ctx); TF_RETURN_IF_ERROR( input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence)); input_ckpt_->Merge(input_ctx->checkpoint()); if (*end_of_sequence) { input_impl_.reset(); *end_of_sequence = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR( BuildCurrentElementIteratorLocked(ctx, false)); } *end_of_sequence = false; return absl::OkStatus(); } absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(element_count_)); FlatMapRandomAccessHandler& random_access = dataset()->random_access_handler_; absl::StatusOr<int64_t> dataset_index = random_access.GetDatasetIndex(parent_index); if (absl::IsOutOfRange(dataset_index.status())) { *end_of_sequence = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR(dataset_index.status()); if (dataset_iterators_.empty()) { TF_ASSIGN_OR_RETURN( dataset_iterators_, random_access.MakeInputIterators(ctx, this, prefix())); next_positions_.resize(dataset_iterators_.size(), 0); input_element_counts_.resize(dataset_iterators_.size(), 0); } IteratorContext::Params params(ctx); params.index_mapper = GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index); IteratorContext global_shuffle_ctx(std::move(params)); TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext( &global_shuffle_ctx, out_tensors, end_of_sequence)); ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint()); ++element_count_; ++input_element_counts_[*dataset_index]; return absl::OkStatus(); } IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper, size_t input_dataset_index) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { absl::StatusOr<int64_t> cardinality = dataset()->random_access_handler_.Cardinality(); return [this, parent_index_mapper = std::move(parent_index_mapper), input_dataset_index, cardinality = std::move(cardinality)]( size_t element_position) -> absl::StatusOr<size_t> { if (!cardinality.ok() || *cardinality < 0) { return absl::FailedPreconditionError( "Global shuffling requires finite cardinalities."); } FlatMapRandomAccessHandler& random_access = dataset()->random_access_handler_; while (next_positions_[input_dataset_index] < *cardinality) { size_t index = next_positions_[input_dataset_index]; if (parent_index_mapper != nullptr) { TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index)); } ++next_positions_[input_dataset_index]; TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index, random_access.GetDatasetIndex(index)); if (input_dataset_index == shuffled_dataset_index) { if (input_dataset_index > 0) { TF_ASSIGN_OR_RETURN( int64_t cumulative_cardinality, random_access.CumulativeCardinality(input_dataset_index - 1)); index -= cumulative_cardinality; } return index; } } return *cardinality; }; } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeInterleaveManyNode( std::move(args), {model::MakeNonTunableParameter(kCycleLength, 1)}); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override TF_LOCKS_EXCLUDED(mu_) { TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus( dataset()->captured_func_->CheckExternalState())); mutex_lock l(mu_); TF_RETURN_IF_ERROR(writer->WriteScalar( prefix(), kExhausted, static_cast<int64_t>(!input_impl_))); if (input_impl_) { TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); TF_RETURN_IF_ERROR( writer->WriteScalar(prefix(), kElementIndex, element_index_)); TF_RETURN_IF_ERROR(writer->WriteScalar( prefix(), kCurrentElementIteratorUninitialized, static_cast<int64_t>(!current_element_iterator_))); if (current_element_iterator_ && !ctx->symbolic_checkpoint()) { TF_RETURN_IF_ERROR( writer->WriteScalar(prefix(), kInputsSize, inputs_.size())); for (int i = 0; i < inputs_.size(); i++) { TF_RETURN_IF_ERROR(writer->WriteTensor( prefix(), strings::StrCat(kInputs, "[", i, "]"), inputs_[i])); } TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_)); } } return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override TF_LOCKS_EXCLUDED(mu_) { if (ctx->restored_element_count().has_value()) { return RestoreForGlobalShuffle(ctx, reader); } mutex_lock l(mu_); input_impl_.reset(); element_index_ = 0; current_element_iterator_.reset(); inputs_.clear(); int64_t input_exhausted; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kExhausted, &input_exhausted)); if (!static_cast<bool>(input_exhausted)) { TF_RETURN_IF_ERROR( dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_)); TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); { int64_t temp; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kElementIndex, &temp)); element_index_ = temp; } int64_t current_element_iterator_uninitialized; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized, &current_element_iterator_uninitialized)); if (!static_cast<bool>(current_element_iterator_uninitialized)) { TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader)); } } return absl::OkStatus(); } Status RestoreForGlobalShuffle(IteratorContext* ctx, IteratorStateReader* reader) TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); element_count_ = *ctx->restored_element_count(); FlatMapRandomAccessHandler& random_access = dataset()->random_access_handler_; TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality()); if (dataset_iterators_.empty()) { TF_ASSIGN_OR_RETURN( dataset_iterators_, random_access.MakeInputIterators(ctx, this, prefix())); } input_element_counts_.resize(dataset_iterators_.size(), 0); next_positions_.resize(dataset_iterators_.size(), 0); std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0); std::fill(next_positions_.begin(), next_positions_.end(), 0); for (size_t count = 0; count < element_count_ && count < cardinality; ++count) { TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count)); absl::StatusOr<size_t> dataset_index = random_access.GetDatasetIndex(parent_index); if (absl::IsOutOfRange(dataset_index.status())) { break; } TF_RETURN_IF_ERROR(dataset_index.status()); ++input_element_counts_[*dataset_index]; next_positions_[*dataset_index] = count + 1; } for (size_t i = 0; i < dataset_iterators_.size(); ++i) { IteratorContext::Params params(ctx); params.restored_element_count = input_element_counts_[i]; IteratorContext ctx_copy(std::move(params)); TF_RETURN_IF_ERROR( RestoreInput(&ctx_copy, reader, dataset_iterators_[i])); ctx->MergeCheckpoint(ctx_copy.checkpoint()); } return absl::OkStatus(); } private: Status BuildCurrentElementIteratorLocked(IteratorContext* ctx, bool is_get_next) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { std::shared_ptr<model::Node> node = is_get_next ? model_node() : nullptr; return MakeIteratorFromInputElement( ctx, this, inputs_, element_index_++, *instantiated_captured_func_, prefix(), &current_element_iterator_, node); } Status RestoreCurrentElementIterator(IteratorContext* ctx, IteratorStateReader* reader) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (ctx->symbolic_checkpoint()) { return RestoreCurrentElementIteratorSymbolic(ctx, reader); } size_t inputs_size; { int64_t temp; TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp)); inputs_size = static_cast<size_t>(temp); } inputs_.reserve(inputs_size); for (int i = 0; i < inputs_size; i++) { inputs_.emplace_back(); TF_RETURN_IF_ERROR(reader->ReadTensor( ctx->flr(), prefix(), strings::StrCat(kInputs, "[", i, "]"), &inputs_.back())); } element_index_--; TF_RETURN_IF_ERROR( BuildCurrentElementIteratorLocked(ctx, false)); TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_)); return absl::OkStatus(); } Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx, IteratorStateReader* reader) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { bool end_of_sequence; auto input_ctx = std::make_unique<IteratorContext>(*ctx); TF_RETURN_IF_ERROR( input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence)); if (end_of_sequence) { return absl::FailedPreconditionError( "Unexpected end of sequence while symbolically restoring " "FlatMapDataset. Please verify that the input produces data " "deterministically."); } input_ckpt_->Merge(input_ctx->checkpoint()); element_index_--; TF_RETURN_IF_ERROR( BuildCurrentElementIteratorLocked(ctx, false)); TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_)); return absl::OkStatus(); } mutex mu_; size_t element_index_ TF_GUARDED_BY(mu_) = 0; std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_); std::vector<Tensor> inputs_ TF_GUARDED_BY(mu_); std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_; size_t element_count_ TF_GUARDED_BY(mu_) = 0; std::vector<int64_t> input_element_counts_ TF_GUARDED_BY(mu_); std::vector<size_t> next_positions_; std::vector<std::unique_ptr<IteratorBase>> dataset_iterators_ TF_GUARDED_BY(mu_); std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_); std::unique_ptr<IteratorBase> current_element_iterator_ TF_GUARDED_BY(mu_); }; const DatasetBase* const input_; const std::unique_ptr<CapturedFunction> captured_func_; const DataTypeVector output_types_; const std::vector<PartialTensorShape> output_shapes_; absl::Status random_indexing_compatible_ = absl::OkStatus(); mutable FlatMapRandomAccessHandler random_access_handler_; }; FlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) { OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {}, &func_metadata_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_)); } void FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { std::unique_ptr<CapturedFunction> captured_func; OP_REQUIRES_OK(ctx, CapturedFunction::Create(ctx, func_metadata_, kOtherArguments, &captured_func)); *output = new Dataset(ctx, input, std::move(captured_func), output_types_, output_shapes_); } namespace { REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU), FlatMapDatasetOp); REGISTER_INPUT_COLOCATION_EXEMPTION("FlatMapDataset"); } } }
#include "tensorflow/core/kernels/data/flat_map_dataset_op.h" #include "tensorflow/core/data/dataset_test_base.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "flat_map_dataset"; class FlatMapDatasetParams : public DatasetParams { public: template <typename T> FlatMapDatasetParams(T input_dataset_params, std::vector<Tensor> other_arguments, FunctionDefHelper::AttrValueWrapper func, std::vector<FunctionDef> func_lib, DataTypeVector type_arguments, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), other_arguments_(std::move(other_arguments)), func_(std::move(func)), func_lib_(std::move(func_lib)), type_arguments_(std::move(type_arguments)) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { return other_arguments_; } Status GetInputNames(std::vector<string>* input_names) const override { input_names->emplace_back(FlatMapDatasetOp::kInputDataset); for (int i = 0; i < other_arguments_.size(); ++i) { input_names->emplace_back( absl::StrCat(FlatMapDatasetOp::kOtherArguments, "_", i)); } return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { *attr_vector = {{"f", func_}, {"Targuments", type_arguments_}, {"output_shapes", output_shapes_}, {"output_types", output_dtypes_}, {"metadata", ""}}; return absl::OkStatus(); } string dataset_type() const override { return FlatMapDatasetOp::kDatasetType; } std::vector<FunctionDef> func_lib() const override { return func_lib_; } private: std::vector<Tensor> other_arguments_; FunctionDefHelper::AttrValueWrapper func_; std::vector<FunctionDef> func_lib_; DataTypeVector type_arguments_; }; class FlatMapDatasetOpTest : public DatasetOpsTestBase {}; FlatMapDatasetParams FlatMapDatasetParams1() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{3, 3, 1}, {0, 1, 2, 3, 4, 5, 6, 7, 8})}, "tensor_slice"); auto func = FunctionDefHelper::FunctionRef( "MakeTensorSliceDataset", {{"Toutput_types", DataTypeVector({DT_INT64})}, {"output_shapes", std::vector<PartialTensorShape>({PartialTensorShape({1})})}}); return FlatMapDatasetParams( std::move(tensor_slice_dataset_params), {}, func, {test::function::MakeTensorSliceDataset()}, {}, {DT_INT64}, {PartialTensorShape({1})}, kNodeName); } FlatMapDatasetParams InvalidFlatMapDatasetParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>(TensorShape{3, 3, 1}, {0, 1, 2, 3, 4, 5, 6, 7, 8})}, "tensor_slice"); auto func = FunctionDefHelper::FunctionRef( "NonZero", {{"T", DT_INT64}}); return FlatMapDatasetParams(std::move(tensor_slice_dataset_params), {}, func, {test::function::NonZero()}, {}, {DT_INT64}, {PartialTensorShape({1})}, kNodeName); } std::vector<GetNextTestCase<FlatMapDatasetParams>> GetNextTestCases() { return { {FlatMapDatasetParams1(), CreateTensors<int64_t>(TensorShape({1}), {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}}; } ITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams, GetNextTestCases()) std::vector<SkipTestCase<FlatMapDatasetParams>> SkipTestCases() { return {{FlatMapDatasetParams1(), 2, 2, true, CreateTensors<int64_t>(TensorShape({1}), {{2}})}, {FlatMapDatasetParams1(), 4, 4, true, CreateTensors<int64_t>(TensorShape({1}), {{4}})}, {FlatMapDatasetParams1(), 9, 9, false}, {FlatMapDatasetParams1(), 10, 9, false}}; } ITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams, SkipTestCases()) TEST_F(FlatMapDatasetOpTest, DatasetNodeName) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(FlatMapDatasetOpTest, DatasetTypeString) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(FlatMapDatasetOp::kDatasetType))); } TEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes())); } TEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes())); } TEST_F(FlatMapDatasetOpTest, Cardinality) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality)); } TEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes())); } TEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes())); } TEST_F(FlatMapDatasetOpTest, IteratorPrefix) { auto dataset_params = FlatMapDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix()))); } std::vector<IteratorSaveAndRestoreTestCase<FlatMapDatasetParams>> IteratorSaveAndRestoreTestCases() { return { {FlatMapDatasetParams1(), {0, 4, 11}, CreateTensors<int64_t>(TensorShape({1}), {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams, IteratorSaveAndRestoreTestCases()) TEST_F(FlatMapDatasetOpTest, InvalidMapFunc) { auto dataset_params = InvalidFlatMapDatasetParams(); TF_ASSERT_OK(Initialize(dataset_params)); bool end_of_sequence = false; std::vector<Tensor> out_tensors; EXPECT_EQ( iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence) .code(), absl::StatusCode::kInvalidArgument); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2f0470b6-f9db-44fe-a6aa-ed8d231c295a
cpp
tensorflow/tensorflow
validate
tensorflow/core/graph/validate.cc
tensorflow/core/graph/validate_test.cc
#include "tensorflow/core/graph/validate.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace graph { Status ValidateGraphDef(const GraphDef& graph_def, const OpRegistryInterface& op_registry) { Status s; const int version = graph_def.versions().producer(); for (const NodeDef& node_def : graph_def.node()) { const OpDef* op_def; TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(node_def.op(), &op_def)); TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def)); TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, version)); } return s; } Status ValidateGraphDefAgainstOpRegistry( const GraphDef& graph_def, const OpRegistryInterface& op_registry) { GraphDef copy(graph_def); TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(&copy, op_registry, 0)); return ValidateGraphDef(copy, op_registry); } Status ValidateGraphDefAgainstOpList(const GraphDef& graph_def, const OpList& op_list) { OpListOpRegistry registry(&op_list); return ValidateGraphDefAgainstOpRegistry(graph_def, registry); } void GetOpListForValidation(OpList* op_list, const OpRegistry& op_registry) { op_registry.Export(false, op_list); RemoveDescriptionsFromOpList(op_list); } Status ValidateGraphHasNoCycle(const Graph& graph) { std::vector<const Node*> ready; std::vector<int> pending_count(graph.num_node_ids(), 0); for (int i = 0; i < graph.num_node_ids(); ++i) { const Node* n = graph.FindNodeId(i); if (n == nullptr) continue; pending_count[i] = n->in_edges().size(); if (n->IsMerge()) { for (const Edge* e : n->in_edges()) { if (!e->IsControlEdge() && e->src()->IsNextIteration()) { pending_count[i]--; } } } if (pending_count[i] == 0) { ready.push_back(n); } } int processed = 0; while (!ready.empty()) { const Node* node = ready.back(); ready.pop_back(); ++processed; for (const Edge* out : node->out_edges()) { const int output_id = out->dst()->id(); pending_count[output_id]--; if (pending_count[output_id] == 0) { ready.push_back(out->dst()); } } } if (processed < graph.num_nodes()) { std::vector<string> nodes_in_cycle; for (int i = 0; i < pending_count.size() && nodes_in_cycle.size() < 3; ++i) { if (pending_count[i] != 0) { nodes_in_cycle.push_back(graph.FindNodeId(i)->name()); } } return errors::InvalidArgument( "Graph is invalid, contains a cycle with ", graph.num_nodes() - processed, " nodes, including: ", absl::StrJoin(nodes_in_cycle, ", ")); } return absl::OkStatus(); } Status VerifyNoDuplicateNodeNames(const GraphDef& graph) { absl::flat_hash_set<absl::string_view> nodes; for (const auto& node : graph.node()) { if (nodes.contains(node.name())) { return errors::AlreadyExists("Node already exists: ", node.name()); } nodes.insert(node.name()); } return absl::OkStatus(); } } }
#include "tensorflow/core/graph/validate.h" #include <string> #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/op_def_builder.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { REGISTER_OP("FloatInput").Output("o: float"); REGISTER_OP("Int32Input").Output("o: int32"); TEST(ValidateGraphDefTest, TestValidGraph) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { name: 'B' op: 'FloatInput' }" "node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }" " input: ['A', 'B'] }"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global())); } TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { name: 'B' op: 'Int32Input' }" "node { " " name: 'C' op: 'Sum' " " attr { key: 'T' value { type: DT_FLOAT } }" " input: ['A', 'B'] " "}"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global()); EXPECT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr")); TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0)); TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global())); } TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { " " name: 'B' op: 'Cast' " " attr { key: 'SrcT' value { type: DT_FLOAT } }" " input: ['A'] " "}"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global()); EXPECT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr")); TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0)); s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global()); EXPECT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr")); } TEST(ValidateGraphDefAgainstOpListTest, GraphWithOpOnlyInOpList) { OpRegistrationData op_reg_data; TF_ASSERT_OK(OpDefBuilder("UniqueSnowflake").Finalize(&op_reg_data)); OpList op_list; *op_list.add_op() = op_reg_data.op_def; const string graph_def_str = "node { name: 'A' op: 'UniqueSnowflake' }"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; TF_ASSERT_OK(graph::ValidateGraphDefAgainstOpList(graph_def, op_list)); } TEST(ValidateGraphDefAgainstOpListTest, GraphWithGlobalOpNotInOpList) { OpRegistrationData op_reg_data; TF_ASSERT_OK(OpDefBuilder("NotAnywhere").Finalize(&op_reg_data)); OpList op_list; *op_list.add_op() = op_reg_data.op_def; const string graph_def_str = "node { name: 'A' op: 'FloatInput' }"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; ASSERT_FALSE(graph::ValidateGraphDefAgainstOpList(graph_def, op_list).ok()); } REGISTER_OP("HasDocs").Doc("This is in the summary."); TEST(GetOpListForValidationTest, ShouldStripDocs) { bool found_float = false; bool found_int32 = false; bool found_has_docs = false; OpList op_list; graph::GetOpListForValidation(&op_list); for (const OpDef& op_def : op_list.op()) { if (op_def.name() == "FloatInput") { EXPECT_FALSE(found_float); found_float = true; } if (op_def.name() == "Int32Input") { EXPECT_FALSE(found_int32); found_int32 = true; } if (op_def.name() == "HasDocs") { EXPECT_FALSE(found_has_docs); found_has_docs = true; EXPECT_TRUE(op_def.summary().empty()); } } EXPECT_TRUE(found_float); EXPECT_TRUE(found_int32); EXPECT_TRUE(found_has_docs); } TEST(VerifyNoDuplicateNodeNames, NoDuplicateNodeNames) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { name: 'B' op: 'Int32Input' }" "node { " " name: 'C' op: 'Sum' " " attr { key: 'T' value { type: DT_FLOAT } }" " input: ['A', 'B'] " "}"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; TF_ASSERT_OK(graph::VerifyNoDuplicateNodeNames(graph_def)); } TEST(VerifyNoDuplicateNodeNames, DuplicateNodeNames) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { name: 'A' op: 'Int32Input' }" "node { " " name: 'C' op: 'Sum' " " attr { key: 'T' value { type: DT_FLOAT } }" " input: ['A', 'A'] " "}"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; EXPECT_EQ(graph::VerifyNoDuplicateNodeNames(graph_def).code(), tensorflow::error::ALREADY_EXISTS); } TEST(ValidateGraphHasNoCycleTest, NoCyclePasses) { const string graph_def_str = "node { name: 'A' op: 'FloatInput' }" "node { name: 'B' op: 'FloatInput' }" "node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }" " input: ['A', 'B'] }"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; Graph graph(OpRegistry::Global()); GraphConstructorOptions opts; TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph)); TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph)); } TEST(ValidateGraphHasNoCycleTest, NoCycleWithMergePasses) { const string graph_def_str = R"EOF( node { name: 'A' op: 'FloatInput' } node { name: 'merge' op: 'Merge' input: [ 'A:0', 'next:0' ] attr { key: "N" value: { i: 2 } } attr { key: "T" value: { type: DT_FLOAT } } } node { name: 'B' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } } input: [ 'merge:0', 'merge:0' ] } node { name: 'next' op: 'NextIteration' input: ['B:0'] attr { key: "T" value: { type: DT_FLOAT } } } )EOF"; GraphDef graph_def; auto parser = protobuf::TextFormat::Parser(); CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str; Graph graph(OpRegistry::Global()); GraphConstructorOptions opts; TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph)); TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph)); } Node* AddNodeFromNodeDef(Graph& graph, const string& name, const string& node_type, int num_inputs) { auto builder = NodeDefBuilder(name, node_type); for (int i = 0; i < num_inputs; ++i) { builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT); } NodeDef node_def; TF_CHECK_OK(builder.Finalize(&node_def)); Status s; Node* node = graph.AddNode(node_def, &s); TF_CHECK_OK(s); return node; } TEST(ValidateGraphHasNoCycleTest, CycleFails) { Graph graph(OpRegistry::Global()); Node* a = AddNodeFromNodeDef(graph, "A", "FloatInput", 0); Node* c = AddNodeFromNodeDef(graph, "B", "Mul", 2); graph.AddEdge(a, 0, c, 0); graph.AddEdge(c, 0, c, 1); EXPECT_THAT( graph::ValidateGraphHasNoCycle(graph), tsl::testing::StatusIs( tsl::error::Code::INVALID_ARGUMENT, ::testing::ContainsRegex("Graph is invalid, contains a cycle"))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4df1949d-aa72-4d9a-8547-771218cf3f33
cpp
google/cel-cpp
resolver
eval/compiler/resolver.cc
eval/compiler/resolver_test.cc
#include "eval/compiler/resolver.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/base/nullability.h" #include "absl/container/flat_hash_map.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/strings/strip.h" #include "absl/types/optional.h" #include "base/kind.h" #include "common/memory.h" #include "common/type.h" #include "common/value.h" #include "common/value_manager.h" #include "internal/status_macros.h" #include "runtime/function_overload_reference.h" #include "runtime/function_registry.h" #include "runtime/type_registry.h" namespace google::api::expr::runtime { using ::cel::Value; Resolver::Resolver( absl::string_view container, const cel::FunctionRegistry& function_registry, const cel::TypeRegistry&, cel::ValueManager& value_factory, const absl::flat_hash_map<std::string, cel::TypeRegistry::Enumeration>& resolveable_enums, bool resolve_qualified_type_identifiers) : namespace_prefixes_(), enum_value_map_(), function_registry_(function_registry), value_factory_(value_factory), resolveable_enums_(resolveable_enums), resolve_qualified_type_identifiers_(resolve_qualified_type_identifiers) { auto container_elements = absl::StrSplit(container, '.'); std::string prefix = ""; namespace_prefixes_.push_back(prefix); for (const auto& elem : container_elements) { if (elem.empty()) { continue; } absl::StrAppend(&prefix, elem, "."); namespace_prefixes_.insert(namespace_prefixes_.begin(), prefix); } for (const auto& prefix : namespace_prefixes_) { for (auto iter = resolveable_enums_.begin(); iter != resolveable_enums_.end(); ++iter) { absl::string_view enum_name = iter->first; if (!absl::StartsWith(enum_name, prefix)) { continue; } auto remainder = absl::StripPrefix(enum_name, prefix); const auto& enum_type = iter->second; for (const auto& enumerator : enum_type.enumerators) { auto key = absl::StrCat(remainder, !remainder.empty() ? "." : "", enumerator.name); enum_value_map_[key] = value_factory.CreateIntValue(enumerator.number); } } } } std::vector<std::string> Resolver::FullyQualifiedNames(absl::string_view name, int64_t expr_id) const { std::vector<std::string> names; if (absl::StartsWith(name, ".")) { std::string fully_qualified_name = std::string(name.substr(1)); names.push_back(fully_qualified_name); return names; } for (const auto& prefix : namespace_prefixes_) { std::string fully_qualified_name = absl::StrCat(prefix, name); names.push_back(fully_qualified_name); } return names; } absl::optional<cel::Value> Resolver::FindConstant(absl::string_view name, int64_t expr_id) const { auto names = FullyQualifiedNames(name, expr_id); for (const auto& name : names) { auto enum_entry = enum_value_map_.find(name); if (enum_entry != enum_value_map_.end()) { return enum_entry->second; } if (resolve_qualified_type_identifiers_ || !absl::StrContains(name, ".")) { auto type_value = value_factory_.FindType(name); if (type_value.ok() && type_value->has_value()) { return value_factory_.CreateTypeValue(**type_value); } } } return absl::nullopt; } std::vector<cel::FunctionOverloadReference> Resolver::FindOverloads( absl::string_view name, bool receiver_style, const std::vector<cel::Kind>& types, int64_t expr_id) const { std::vector<cel::FunctionOverloadReference> funcs; auto names = FullyQualifiedNames(name, expr_id); for (auto it = names.begin(); it != names.end(); it++) { funcs = function_registry_.FindStaticOverloads(*it, receiver_style, types); if (!funcs.empty()) { return funcs; } } return funcs; } std::vector<cel::FunctionRegistry::LazyOverload> Resolver::FindLazyOverloads( absl::string_view name, bool receiver_style, const std::vector<cel::Kind>& types, int64_t expr_id) const { std::vector<cel::FunctionRegistry::LazyOverload> funcs; auto names = FullyQualifiedNames(name, expr_id); for (const auto& name : names) { funcs = function_registry_.FindLazyOverloads(name, receiver_style, types); if (!funcs.empty()) { return funcs; } } return funcs; } absl::StatusOr<absl::optional<std::pair<std::string, cel::Type>>> Resolver::FindType(absl::string_view name, int64_t expr_id) const { auto qualified_names = FullyQualifiedNames(name, expr_id); for (auto& qualified_name : qualified_names) { CEL_ASSIGN_OR_RETURN(auto maybe_type, value_factory_.FindType(qualified_name)); if (maybe_type.has_value()) { return std::make_pair(std::move(qualified_name), std::move(*maybe_type)); } } return absl::nullopt; } }
#include "eval/compiler/resolver.h" #include <memory> #include <string> #include <vector> #include "absl/status/status.h" #include "absl/types/optional.h" #include "base/type_provider.h" #include "common/memory.h" #include "common/type_factory.h" #include "common/type_manager.h" #include "common/value.h" #include "common/value_manager.h" #include "common/values/legacy_value_manager.h" #include "eval/public/cel_function.h" #include "eval/public/cel_function_registry.h" #include "eval/public/cel_type_registry.h" #include "eval/public/cel_value.h" #include "eval/public/structs/protobuf_descriptor_type_provider.h" #include "eval/testutil/test_message.pb.h" #include "internal/testing.h" namespace google::api::expr::runtime { namespace { using ::cel::IntValue; using ::cel::TypeFactory; using ::cel::TypeManager; using ::cel::TypeValue; using ::cel::ValueManager; using ::testing::Eq; class FakeFunction : public CelFunction { public: explicit FakeFunction(const std::string& name) : CelFunction(CelFunctionDescriptor{name, false, {}}) {} absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result, google::protobuf::Arena* arena) const override { return absl::OkStatus(); } }; class ResolverTest : public testing::Test { public: ResolverTest() : value_factory_(cel::MemoryManagerRef::ReferenceCounting(), type_registry_.GetTypeProvider()) {} protected: CelTypeRegistry type_registry_; cel::common_internal::LegacyValueManager value_factory_; }; TEST_F(ResolverTest, TestFullyQualifiedNames) { CelFunctionRegistry func_registry; Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto names = resolver.FullyQualifiedNames("simple_name"); std::vector<std::string> expected_names( {"google.api.expr.simple_name", "google.api.simple_name", "google.simple_name", "simple_name"}); EXPECT_THAT(names, Eq(expected_names)); } TEST_F(ResolverTest, TestFullyQualifiedNamesPartiallyQualifiedName) { CelFunctionRegistry func_registry; Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto names = resolver.FullyQualifiedNames("expr.simple_name"); std::vector<std::string> expected_names( {"google.api.expr.expr.simple_name", "google.api.expr.simple_name", "google.expr.simple_name", "expr.simple_name"}); EXPECT_THAT(names, Eq(expected_names)); } TEST_F(ResolverTest, TestFullyQualifiedNamesAbsoluteName) { CelFunctionRegistry func_registry; Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto names = resolver.FullyQualifiedNames(".google.api.expr.absolute_name"); EXPECT_THAT(names.size(), Eq(1)); EXPECT_THAT(names[0], Eq("google.api.expr.absolute_name")); } TEST_F(ResolverTest, TestFindConstantEnum) { CelFunctionRegistry func_registry; type_registry_.Register(TestMessage::TestEnum_descriptor()); Resolver resolver("google.api.expr.runtime.TestMessage", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto enum_value = resolver.FindConstant("TestEnum.TEST_ENUM_1", -1); ASSERT_TRUE(enum_value); ASSERT_TRUE(enum_value->Is<IntValue>()); EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(1L)); enum_value = resolver.FindConstant( ".google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_2", -1); ASSERT_TRUE(enum_value); ASSERT_TRUE(enum_value->Is<IntValue>()); EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(2L)); } TEST_F(ResolverTest, TestFindConstantUnqualifiedType) { CelFunctionRegistry func_registry; Resolver resolver("cel", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto type_value = resolver.FindConstant("int", -1); EXPECT_TRUE(type_value); EXPECT_TRUE(type_value->Is<TypeValue>()); EXPECT_THAT(type_value->GetType().name(), Eq("int")); } TEST_F(ResolverTest, TestFindConstantFullyQualifiedType) { google::protobuf::LinkMessageReflection<TestMessage>(); CelFunctionRegistry func_registry; type_registry_.RegisterTypeProvider( std::make_unique<ProtobufDescriptorProvider>( google::protobuf::DescriptorPool::generated_pool(), google::protobuf::MessageFactory::generated_factory())); Resolver resolver("cel", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto type_value = resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1); ASSERT_TRUE(type_value); ASSERT_TRUE(type_value->Is<TypeValue>()); EXPECT_THAT(type_value->GetType().name(), Eq("google.api.expr.runtime.TestMessage")); } TEST_F(ResolverTest, TestFindConstantQualifiedTypeDisabled) { CelFunctionRegistry func_registry; type_registry_.RegisterTypeProvider( std::make_unique<ProtobufDescriptorProvider>( google::protobuf::DescriptorPool::generated_pool(), google::protobuf::MessageFactory::generated_factory())); Resolver resolver("", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums(), false); auto type_value = resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1); EXPECT_FALSE(type_value); } TEST_F(ResolverTest, FindTypeBySimpleName) { CelFunctionRegistry func_registry; Resolver resolver("google.api.expr.runtime", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); type_registry_.RegisterTypeProvider( std::make_unique<ProtobufDescriptorProvider>( google::protobuf::DescriptorPool::generated_pool(), google::protobuf::MessageFactory::generated_factory())); ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("TestMessage", -1)); EXPECT_TRUE(type.has_value()); EXPECT_EQ(type->second.name(), "google.api.expr.runtime.TestMessage"); } TEST_F(ResolverTest, FindTypeByQualifiedName) { CelFunctionRegistry func_registry; type_registry_.RegisterTypeProvider( std::make_unique<ProtobufDescriptorProvider>( google::protobuf::DescriptorPool::generated_pool(), google::protobuf::MessageFactory::generated_factory())); Resolver resolver("google.api.expr.runtime", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); ASSERT_OK_AND_ASSIGN( auto type, resolver.FindType(".google.api.expr.runtime.TestMessage", -1)); ASSERT_TRUE(type.has_value()); EXPECT_EQ(type->second.name(), "google.api.expr.runtime.TestMessage"); } TEST_F(ResolverTest, TestFindDescriptorNotFound) { CelFunctionRegistry func_registry; type_registry_.RegisterTypeProvider( std::make_unique<ProtobufDescriptorProvider>( google::protobuf::DescriptorPool::generated_pool(), google::protobuf::MessageFactory::generated_factory())); Resolver resolver("google.api.expr.runtime", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("UndefinedMessage", -1)); EXPECT_FALSE(type.has_value()) << type->second; } TEST_F(ResolverTest, TestFindOverloads) { CelFunctionRegistry func_registry; auto status = func_registry.Register(std::make_unique<FakeFunction>("fake_func")); ASSERT_OK(status); status = func_registry.Register( std::make_unique<FakeFunction>("cel.fake_ns_func")); ASSERT_OK(status); Resolver resolver("cel", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto overloads = resolver.FindOverloads("fake_func", false, ArgumentsMatcher(0)); EXPECT_THAT(overloads.size(), Eq(1)); EXPECT_THAT(overloads[0].descriptor.name(), Eq("fake_func")); overloads = resolver.FindOverloads("fake_ns_func", false, ArgumentsMatcher(0)); EXPECT_THAT(overloads.size(), Eq(1)); EXPECT_THAT(overloads[0].descriptor.name(), Eq("cel.fake_ns_func")); } TEST_F(ResolverTest, TestFindLazyOverloads) { CelFunctionRegistry func_registry; auto status = func_registry.RegisterLazyFunction( CelFunctionDescriptor{"fake_lazy_func", false, {}}); ASSERT_OK(status); status = func_registry.RegisterLazyFunction( CelFunctionDescriptor{"cel.fake_lazy_ns_func", false, {}}); ASSERT_OK(status); Resolver resolver("cel", func_registry.InternalGetRegistry(), type_registry_.InternalGetModernRegistry(), value_factory_, type_registry_.resolveable_enums()); auto overloads = resolver.FindLazyOverloads("fake_lazy_func", false, ArgumentsMatcher(0)); EXPECT_THAT(overloads.size(), Eq(1)); overloads = resolver.FindLazyOverloads("fake_lazy_ns_func", false, ArgumentsMatcher(0)); EXPECT_THAT(overloads.size(), Eq(1)); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver_test.cc
4552db5798fb0853b131b783d8875794334fae7f
9f47e9a6-3306-4f59-a171-c8655f57841a
cpp
tensorflow/tensorflow
exponential_minus_one
tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc
tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc
#include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h" #include <cmath> #include "absl/status/status.h" #include "tensorflow/lite/experimental/shlo/bf16.h" #include "tensorflow/lite/experimental/shlo/dispatch.h" #include "tensorflow/lite/experimental/shlo/f16.h" #include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h" #include "tensorflow/lite/experimental/shlo/ops/util.h" #include "tensorflow/lite/experimental/shlo/tensor.h" namespace shlo_ref { struct ExponentialMinusOne { template <class T> T operator()(T v) const { return std::expm1(v); } }; template <> F16 ExponentialMinusOne::operator()(F16 v) const { return F16(operator()(static_cast<float>(v))); } template <> BF16 ExponentialMinusOne::operator()(BF16 v) const { return BF16(operator()(static_cast<float>(v))); } ExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes) { return {}; } absl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input, Tensor& output) { SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape())); SHLO_REF_RETURN_ON_ERROR( CheckSupportedTypes(CheckCtx("exponential_minus_one"), input, IsFloatTensor, IsQuantizedPerTensorTensor)); SHLO_REF_RETURN_ON_ERROR( CheckSameBaselineType(CheckCtx("exponential_minus_one"), input, output)); return absl::OkStatus(); } absl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input, Tensor& output) { ExponentialMinusOne exponential_minus_one; if (input.IsPerTensorQuantized()) { DISPATCH_QUANTIZED( detail::DequantizeOpQuantizePerTensor, input.quantized_per_tensor_element_type().StorageType(), input.quantized_per_tensor_element_type().ExpressedType(), exponential_minus_one, input, output) } else if (IsFloatTensor(input)) { DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(), exponential_minus_one, input, output); } return absl::FailedPreconditionError( "stablehlo.exponential_minus_one: Unsupported tensor type."); } };
#include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h" #include <cmath> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/experimental/shlo/bf16.h" #include "tensorflow/lite/experimental/shlo/f16.h" #include "tensorflow/lite/experimental/shlo/ops/test_util.h" #include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h" #include "tensorflow/lite/experimental/shlo/quantize.h" #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h" #include "tensorflow/lite/experimental/shlo/shape.h" #include "tensorflow/lite/experimental/shlo/status_matcher.h" #include "tensorflow/lite/experimental/shlo/tensor.h" using testing::ElementsAreArray; using testing::NanSensitiveFloatEq; using testing::Pointwise; namespace shlo_ref { template <> struct ParamName<ExponentialMinusOneOp> { static std::string Get() { return "ExponentialMinusOne"; } }; namespace { struct ExponentialMinusOne { template <class T> T operator()(T v) const { return std::expm1(v); } } exponential_minus_one_ref; template <> F16 ExponentialMinusOne::operator()(F16 v) const { return F16(operator()(static_cast<float>(v))); } template <> BF16 ExponentialMinusOne::operator()(BF16 v) const { return BF16(operator()(static_cast<float>(v))); } INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOne, UnaryElementwiseOpShapePropagationTest, ExponentialMinusOneOp, TestParamNames); INSTANTIATE_TYPED_TEST_SUITE_P( ExponentialMinusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest, UnaryElementwiseConstraint1Types<ExponentialMinusOneOp>, TestParamNames); using UnsupportedTypes = WithOpTypes<ExponentialMinusOneOp, ConcatTypes<BoolTestType, IntTestTypes, PerAxisQuantizedTestTypes>>; INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOneOp, UnaryElementwiseUnsupportedTypeTest, UnsupportedTypes, TestParamNames); template <class T> struct ExponentialMinusOneTest : ::testing::Test {}; TYPED_TEST_SUITE(ExponentialMinusOneTest, FloatTestTypes, TestParamNames); TYPED_TEST(ExponentialMinusOneTest, FloatTensorsWork) { using StorageT = typename TypeParam::StorageT; const Shape shape({2, 3, 4}); Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape); Vector<StorageT> output_data(shape.NumElements()); Tensor input_tensor{ .type = TensorType{.shape = shape, .element_type = TypeParam::kStorage}, .data = input_data.data()}; Tensor output_tensor{ .type = TensorType{.shape = shape, .element_type = TypeParam::kStorage}, .data = output_data.data()}; Vector<StorageT> expected_data(shape.NumElements()); absl::c_transform(input_data, expected_data.begin(), exponential_minus_one_ref); auto op = Create(ExponentialMinusOneOp::Attributes{}); ASSERT_OK(Prepare(op, input_tensor, output_tensor)); ASSERT_OK(Evaluate(op, input_tensor, output_tensor)); EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data)); } template <class T> struct QuantizedExponentialMinusOneTest : ::testing::Test {}; TYPED_TEST_SUITE(QuantizedExponentialMinusOneTest, QuantizedTestTypes, TestParamNames); TYPED_TEST(QuantizedExponentialMinusOneTest, PerTensorWorks) { using StorageT = typename TypeParam::StorageT; using ExpressedT = typename TypeParam::ExpressedT; const Shape shape({2, 3, 4}); Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape); Vector<StorageT> output_data(shape.NumElements()); const ExpressedT scale = static_cast<ExpressedT>(1.5); const StorageT zero_point = static_cast<StorageT>(5); const QuantizedElementTypePerTensor tensor_type = QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point, TypeParam::kExpressed, scale); Tensor input_tensor{ .type = QuantizedPerTensorTensorType{.shape = shape, .element_type = tensor_type}, .data = input_data.data()}; Tensor output_tensor{ .type = QuantizedPerTensorTensorType{.shape = shape, .element_type = tensor_type}, .data = output_data.data()}; Vector<StorageT> expected_data(shape.NumElements()); absl::c_transform( input_data, expected_data.begin(), [zero_point, scale](auto v) { const ExpressedT dequantized_input = Dequantize(v, zero_point, scale); const ExpressedT dequantized_res = exponential_minus_one_ref(dequantized_input); return Quantize<TypeParam::kStorage, TypeParam::kExpressed>( dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale); }); auto op = Create(ExponentialMinusOneOp::Attributes{}); ASSERT_OK(Prepare(op, input_tensor, output_tensor)); ASSERT_OK(Evaluate(op, input_tensor, output_tensor)); EXPECT_THAT(output_data, ElementsAreArray(expected_data)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
535afe38-1169-49f7-af7c-0d9f4af30529
cpp
google/quiche
chacha20_poly1305_decrypter
quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc
quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc
#include "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h" #include "openssl/aead.h" #include "openssl/tls1.h" namespace quic { namespace { const size_t kKeySize = 32; const size_t kNonceSize = 12; } ChaCha20Poly1305Decrypter::ChaCha20Poly1305Decrypter() : ChaChaBaseDecrypter(EVP_aead_chacha20_poly1305, kKeySize, kAuthTagSize, kNonceSize, false) { static_assert(kKeySize <= kMaxKeySize, "key size too big"); static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big"); } ChaCha20Poly1305Decrypter::~ChaCha20Poly1305Decrypter() {} uint32_t ChaCha20Poly1305Decrypter::cipher_id() const { return TLS1_CK_CHACHA20_POLY1305_SHA256; } QuicPacketCount ChaCha20Poly1305Decrypter::GetIntegrityLimit() const { static_assert(kMaxIncomingPacketSize < 16384, "This key limit requires limits on decryption payload sizes"); return 68719476736U; } }
#include "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h" #include <memory> #include <string> #include "absl/strings/escaping.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/common/test_tools/quiche_test_utils.h" namespace { struct TestVector { const char* key; const char* iv; const char* fixed; const char* aad; const char* ct; const char* pt; }; const TestVector test_vectors[] = { {"808182838485868788898a8b8c8d8e8f" "909192939495969798999a9b9c9d9e9f", "4041424344454647", "07000000", "50515253c0c1c2c3c4c5c6c7", "d31a8d34648e60db7b86afbc53ef7ec2" "a4aded51296e08fea9e2b5a736ee62d6" "3dbea45e8ca9671282fafb69da92728b" "1a71de0a9e060b2905d6a5b67ecd3b36" "92ddbd7f2d778b8c9803aee328091b58" "fab324e4fad675945585808b4831d7bc" "3ff4def08e4b7a9de576d26586cec64b" "6116" "1ae10b594f09e26a7e902ecb", "4c616469657320616e642047656e746c" "656d656e206f662074686520636c6173" "73206f66202739393a20496620492063" "6f756c64206f6666657220796f75206f" "6e6c79206f6e652074697020666f7220" "746865206675747572652c2073756e73" "637265656e20776f756c642062652069" "742e"}, {"808182838485868788898a8b8c8d8e8f" "909192939495969798999a9b9c9d9e9f", "4041424344454647", "07000000", "50515253c0c1c2c3c4c5c6c7", "d31a8d34648e60db7b86afbc53ef7ec2" "a4aded51296e08fea9e2b5a736ee62d6" "3dbea45e8ca9671282fafb69da92728b" "1a71de0a9e060b2905d6a5b67ecd3b36" "92ddbd7f2d778b8c9803aee328091b58" "fab324e4fad675945585808b4831d7bc" "3ff4def08e4b7a9de576d26586cec64b" "6116" "1ae10b594f09e26a7e902ecc", nullptr}, {"808182838485868788898a8b8c8d8e8f" "909192939495969798999a9b9c9d9e9f", "4041424344454647", "07000000", "60515253c0c1c2c3c4c5c6c7", "d31a8d34648e60db7b86afbc53ef7ec2" "a4aded51296e08fea9e2b5a736ee62d6" "3dbea45e8ca9671282fafb69da92728b" "1a71de0a9e060b2905d6a5b67ecd3b36" "92ddbd7f2d778b8c9803aee328091b58" "fab324e4fad675945585808b4831d7bc" "3ff4def08e4b7a9de576d26586cec64b" "6116" "1ae10b594f09e26a7e902ecb", nullptr}, {nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}}; } namespace quic { namespace test { QuicData* DecryptWithNonce(ChaCha20Poly1305Decrypter* decrypter, absl::string_view nonce, absl::string_view associated_data, absl::string_view ciphertext) { uint64_t packet_number; absl::string_view nonce_prefix(nonce.data(), nonce.size() - sizeof(packet_number)); decrypter->SetNoncePrefix(nonce_prefix); memcpy(&packet_number, nonce.data() + nonce_prefix.size(), sizeof(packet_number)); std::unique_ptr<char[]> output(new char[ciphertext.length()]); size_t output_length = 0; const bool success = decrypter->DecryptPacket( packet_number, associated_data, ciphertext, output.get(), &output_length, ciphertext.length()); if (!success) { return nullptr; } return new QuicData(output.release(), output_length, true); } class ChaCha20Poly1305DecrypterTest : public QuicTest {}; TEST_F(ChaCha20Poly1305DecrypterTest, Decrypt) { for (size_t i = 0; test_vectors[i].key != nullptr; i++) { bool has_pt = test_vectors[i].pt; std::string key; std::string iv; std::string fixed; std::string aad; std::string ct; std::string pt; ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].key, &key)); ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].iv, &iv)); ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].fixed, &fixed)); ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].aad, &aad)); ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].ct, &ct)); if (has_pt) { ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].pt, &pt)); } ChaCha20Poly1305Decrypter decrypter; ASSERT_TRUE(decrypter.SetKey(key)); std::unique_ptr<QuicData> decrypted(DecryptWithNonce( &decrypter, fixed + iv, absl::string_view(aad.length() ? aad.data() : nullptr, aad.length()), ct)); if (!decrypted) { EXPECT_FALSE(has_pt); continue; } EXPECT_TRUE(has_pt); EXPECT_EQ(12u, ct.size() - decrypted->length()); ASSERT_EQ(pt.length(), decrypted->length()); quiche::test::CompareCharArraysWithHexError( "plaintext", decrypted->data(), pt.length(), pt.data(), pt.length()); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
13bd3235-e3de-4413-be34-c4c9024651b4
cpp
google/libaddressinput
address_problem
cpp/src/address_problem.cc
cpp/test/address_problem_test.cc
#include <libaddressinput/address_problem.h> #include <cstddef> #include <ostream> #include "util/size.h" using i18n::addressinput::AddressProblem; using i18n::addressinput::size; using i18n::addressinput::UNEXPECTED_FIELD; using i18n::addressinput::UNSUPPORTED_FIELD; std::ostream& operator<<(std::ostream& o, AddressProblem problem) { static const char* const kProblemNames[] = { "UNEXPECTED_FIELD", "MISSING_REQUIRED_FIELD", "UNKNOWN_VALUE", "INVALID_FORMAT", "MISMATCHING_VALUE", "USES_P_O_BOX", "UNSUPPORTED_FIELD", }; static_assert(UNEXPECTED_FIELD == 0, "bad_base"); static_assert(UNSUPPORTED_FIELD == size(kProblemNames) - 1, "bad_length"); if (problem < 0 || static_cast<size_t>(problem) >= size(kProblemNames)) { o << "[INVALID ENUM VALUE " << static_cast<int>(problem) << "]"; } else { o << kProblemNames[problem]; } return o; }
#include <libaddressinput/address_problem.h> #include <sstream> #include <gtest/gtest.h> namespace { using i18n::addressinput::UNKNOWN_VALUE; TEST(AddressProblemTest, ValidEnumValue) { std::ostringstream oss; oss << UNKNOWN_VALUE; EXPECT_EQ("UNKNOWN_VALUE", oss.str()); } }
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_problem.cc
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_problem_test.cc
2610f7b1043d6784ada41392fc9392d1ea09ea07
54ac5196-c078-4979-b2bd-62baf3bb4e06
cpp
tensorflow/tensorflow
fake_clock_env
tensorflow/core/util/fake_clock_env.cc
tensorflow/core/util/fake_clock_env_test.cc
#include "tensorflow/core/util/fake_clock_env.h" #include <string> namespace tensorflow { FakeClockEnv::FakeClockEnv(Env* wrapped) : EnvWrapper(wrapped) {} void FakeClockEnv::AdvanceByMicroseconds(int64_t micros) { { mutex_lock l(mu_); current_time_ += micros; } } uint64 FakeClockEnv::NowMicros() const { { mutex_lock l(mu_); return current_time_; } } }
#include "tensorflow/core/util/fake_clock_env.h" #include <memory> #include <gtest/gtest.h> #include "tensorflow/core/platform/env.h" namespace tensorflow { namespace { class FakeClockEnvTest : public ::testing::Test { protected: void SetUp() override { fake_clock_env_ = std::make_unique<FakeClockEnv>(Env::Default()); } void TearDown() override { fake_clock_env_.reset(); } std::unique_ptr<FakeClockEnv> fake_clock_env_; }; TEST_F(FakeClockEnvTest, TimeInitializedToZero) { EXPECT_EQ(0, fake_clock_env_->NowMicros()); } TEST_F(FakeClockEnvTest, AdvanceTimeByMicroseconds) { int current_time = fake_clock_env_->NowMicros(); int64_t duration = 100; current_time += duration; fake_clock_env_->AdvanceByMicroseconds(duration); EXPECT_EQ(current_time, fake_clock_env_->NowMicros()); for (int i = 0; i < 5; ++i) { fake_clock_env_->AdvanceByMicroseconds(100); current_time += 100; } EXPECT_EQ(current_time, fake_clock_env_->NowMicros()); current_time += duration; duration = 200; fake_clock_env_->AdvanceByMicroseconds(duration); EXPECT_NE(current_time, fake_clock_env_->NowMicros()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
eef8dca3-7df6-482a-8512-d6ab41154110
cpp
tensorflow/tensorflow
tensor_testutil
tensorflow/core/framework/tensor_testutil.cc
tensorflow/core/framework/tensor_testutil_test.cc
#include "tensorflow/core/framework/tensor_testutil.h" #include <cmath> #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace test { ::testing::AssertionResult IsSameType(const Tensor& x, const Tensor& y) { if (x.dtype() != y.dtype()) { return ::testing::AssertionFailure() << "Tensors have different dtypes (" << x.dtype() << " vs " << y.dtype() << ")"; } return ::testing::AssertionSuccess(); } ::testing::AssertionResult IsSameShape(const Tensor& x, const Tensor& y) { if (!x.IsSameSize(y)) { return ::testing::AssertionFailure() << "Tensors have different shapes (" << x.shape().DebugString() << " vs " << y.shape().DebugString() << ")"; } return ::testing::AssertionSuccess(); } template <typename T> static ::testing::AssertionResult EqualFailure(const T& x, const T& y) { return ::testing::AssertionFailure() << std::setprecision(std::numeric_limits<T>::digits10 + 2) << x << " not equal to " << y; } template <> ::testing::AssertionResult EqualFailure<int8>(const int8& x, const int8& y) { return EqualFailure(static_cast<int>(x), static_cast<int>(y)); } static ::testing::AssertionResult IsEqual(float x, float y, Tolerance t) { if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y)) return ::testing::AssertionSuccess(); if (t == Tolerance::kNone) { if (x == y) return ::testing::AssertionSuccess(); } else { if (::testing::internal::CmpHelperFloatingPointEQ<float>("", "", x, y)) return ::testing::AssertionSuccess(); } return EqualFailure(x, y); } static ::testing::AssertionResult IsEqual(double x, double y, Tolerance t) { if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y)) return ::testing::AssertionSuccess(); if (t == Tolerance::kNone) { if (x == y) return ::testing::AssertionSuccess(); } else { if (::testing::internal::CmpHelperFloatingPointEQ<double>("", "", x, y)) return ::testing::AssertionSuccess(); } return EqualFailure(x, y); } static ::testing::AssertionResult IsEqual(Eigen::half x, Eigen::half y, Tolerance t) { if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y)) return ::testing::AssertionSuccess(); if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y)) return EqualFailure(x, y); auto sign_and_magnitude_to_biased = [](uint16_t sam) { const uint16_t kSignBitMask = 0x8000; if (kSignBitMask & sam) return ~sam + 1; return kSignBitMask | sam; }; auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x)); auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y)); if (t == Tolerance::kNone) { if (xb == yb) return ::testing::AssertionSuccess(); } else { auto distance = xb >= yb ? xb - yb : yb - xb; const uint16_t kMaxUlps = 4; if (distance <= kMaxUlps) return ::testing::AssertionSuccess(); } return EqualFailure(x, y); } static ::testing::AssertionResult IsEqual(tsl::bfloat16 x, tsl::bfloat16 y, Tolerance t) { if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y)) return ::testing::AssertionSuccess(); if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y)) return EqualFailure(x, y); auto sign_and_magnitude_to_biased = [](uint16_t sam) { const uint16_t kSignBitMask = 0x8000; if (kSignBitMask & sam) return ~sam + 1; return kSignBitMask | sam; }; auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x)); auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y)); if (t == Tolerance::kNone) { if (xb == yb) return ::testing::AssertionSuccess(); } else { auto distance = xb >= yb ? xb - yb : yb - xb; const uint16_t kMaxUlps = 4; if (distance <= kMaxUlps) return ::testing::AssertionSuccess(); } return EqualFailure(x, y); } template <typename T> static ::testing::AssertionResult IsEqual(const T& x, const T& y, Tolerance t) { if (::testing::internal::CmpHelperEQ<T>("", "", x, y)) return ::testing::AssertionSuccess(); return EqualFailure(x, y); } template <typename T> static ::testing::AssertionResult IsEqual(const std::complex<T>& x, const std::complex<T>& y, Tolerance t) { if (IsEqual(x.real(), y.real(), t) && IsEqual(x.imag(), y.imag(), t)) return ::testing::AssertionSuccess(); return EqualFailure(x, y); } template <typename T> static void ExpectEqual(const Tensor& x, const Tensor& y, Tolerance t = Tolerance::kDefault) { const T* Tx = x.unaligned_flat<T>().data(); const T* Ty = y.unaligned_flat<T>().data(); auto size = x.NumElements(); int max_failures = 10; int num_failures = 0; for (decltype(size) i = 0; i < size; ++i) { EXPECT_TRUE(IsEqual(Tx[i], Ty[i], t)) << "i = " << (++num_failures, i); ASSERT_LT(num_failures, max_failures) << "Too many mismatches, giving up."; } } template <typename T> static ::testing::AssertionResult IsClose(const T& x, const T& y, const T& atol, const T& rtol) { if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y)) return ::testing::AssertionSuccess(); if (x == y) return ::testing::AssertionSuccess(); auto tolerance = atol + rtol * Eigen::numext::abs(x); if (Eigen::numext::abs(x - y) <= tolerance) return ::testing::AssertionSuccess(); return ::testing::AssertionFailure() << x << " not close to " << y; } template <typename T> static ::testing::AssertionResult IsClose(const std::complex<T>& x, const std::complex<T>& y, const T& atol, const T& rtol) { if (IsClose(x.real(), y.real(), atol, rtol) && IsClose(x.imag(), y.imag(), atol, rtol)) return ::testing::AssertionSuccess(); return ::testing::AssertionFailure() << x << " not close to " << y; } template <typename T> static auto GetTolerance(double tolerance) { using Real = typename Eigen::NumTraits<T>::Real; auto default_tol = static_cast<Real>(5.0) * Eigen::NumTraits<T>::epsilon(); auto result = tolerance < 0.0 ? default_tol : static_cast<Real>(tolerance); EXPECT_GE(result, static_cast<Real>(0)); return result; } template <typename T> static void ExpectClose(const Tensor& x, const Tensor& y, double atol, double rtol) { auto typed_atol = GetTolerance<T>(atol); auto typed_rtol = GetTolerance<T>(rtol); const T* Tx = x.unaligned_flat<T>().data(); const T* Ty = y.unaligned_flat<T>().data(); auto size = x.NumElements(); int max_failures = 10; int num_failures = 0; for (decltype(size) i = 0; i < size; ++i) { EXPECT_TRUE(IsClose(Tx[i], Ty[i], typed_atol, typed_rtol)) << "i = " << (++num_failures, i) << " Tx[i] = " << Tx[i] << " Ty[i] = " << Ty[i]; ASSERT_LT(num_failures, max_failures) << "Too many mismatches (atol = " << atol << " rtol = " << rtol << "), giving up."; } EXPECT_EQ(num_failures, 0) << "Mismatches detected (atol = " << atol << " rtol = " << rtol << ")."; } void ExpectEqual(const Tensor& x, const Tensor& y, Tolerance t) { ASSERT_TRUE(IsSameType(x, y)); ASSERT_TRUE(IsSameShape(x, y)); switch (x.dtype()) { case DT_FLOAT: return ExpectEqual<float>(x, y, t); case DT_DOUBLE: return ExpectEqual<double>(x, y, t); case DT_INT32: return ExpectEqual<int32>(x, y); case DT_UINT32: return ExpectEqual<uint32>(x, y); case DT_UINT16: return ExpectEqual<uint16>(x, y); case DT_UINT8: return ExpectEqual<uint8>(x, y); case DT_INT16: return ExpectEqual<int16>(x, y); case DT_INT8: return ExpectEqual<int8>(x, y); case DT_STRING: return ExpectEqual<tstring>(x, y); case DT_COMPLEX64: return ExpectEqual<complex64>(x, y, t); case DT_COMPLEX128: return ExpectEqual<complex128>(x, y, t); case DT_INT64: return ExpectEqual<int64_t>(x, y); case DT_UINT64: return ExpectEqual<uint64>(x, y); case DT_BOOL: return ExpectEqual<bool>(x, y); case DT_QINT8: return ExpectEqual<qint8>(x, y); case DT_QUINT8: return ExpectEqual<quint8>(x, y); case DT_QINT16: return ExpectEqual<qint16>(x, y); case DT_QUINT16: return ExpectEqual<quint16>(x, y); case DT_QINT32: return ExpectEqual<qint32>(x, y); case DT_BFLOAT16: return ExpectEqual<bfloat16>(x, y, t); case DT_HALF: return ExpectEqual<Eigen::half>(x, y, t); case DT_FLOAT8_E5M2: return ExpectEqual<float8_e5m2>(x, y, t); case DT_FLOAT8_E4M3FN: return ExpectEqual<float8_e4m3fn>(x, y, t); case DT_INT4: return ExpectEqual<int4>(x, y, t); case DT_UINT4: return ExpectEqual<uint4>(x, y, t); default: EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype()); } } void ExpectClose(const Tensor& x, const Tensor& y, double atol, double rtol) { ASSERT_TRUE(IsSameType(x, y)); ASSERT_TRUE(IsSameShape(x, y)); switch (x.dtype()) { case DT_HALF: return ExpectClose<Eigen::half>(x, y, atol, rtol); case DT_BFLOAT16: return ExpectClose<Eigen::bfloat16>(x, y, atol, rtol); case DT_FLOAT: return ExpectClose<float>(x, y, atol, rtol); case DT_DOUBLE: return ExpectClose<double>(x, y, atol, rtol); case DT_COMPLEX64: return ExpectClose<complex64>(x, y, atol, rtol); case DT_COMPLEX128: return ExpectClose<complex128>(x, y, atol, rtol); default: EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype()); } } ::testing::AssertionResult internal_test::IsClose(Eigen::half x, Eigen::half y, double atol, double rtol) { return test::IsClose(x, y, GetTolerance<Eigen::half>(atol), GetTolerance<Eigen::half>(rtol)); } ::testing::AssertionResult internal_test::IsClose(float x, float y, double atol, double rtol) { return test::IsClose(x, y, GetTolerance<float>(atol), GetTolerance<float>(rtol)); } ::testing::AssertionResult internal_test::IsClose(double x, double y, double atol, double rtol) { return test::IsClose(x, y, GetTolerance<double>(atol), GetTolerance<double>(rtol)); } } }
#include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace test { namespace { using internal_test::IsClose; template <typename T> void TestEdgeCasesNear() { EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(), Eigen::NumTraits<T>::infinity(), 0.0, 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(), Eigen::NumTraits<double>::infinity(), 0.0)); EXPECT_FALSE( IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(), static_cast<double>(Eigen::NumTraits<T>::highest()), 0.0)); EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0)); EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), Eigen::NumTraits<double>::infinity(), 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<double>::infinity(), 0.0)); } template <typename T, typename U> void dumpFloatingPointStorage(T value) { U* integral = reinterpret_cast<U*>(&value); int shift_amount = (sizeof(U) << 3) - 1; int exponent_bits = 2 + (log2(sizeof(U)) * 3); U mask = static_cast<U>(1) << shift_amount; for (int bits = 0; bits <= shift_amount; ++bits) { std::cout << ((*integral & mask) > 0); if (bits == 0 || bits == exponent_bits) std::cout << " "; mask >>= 1; } std::cout << std::endl; printf("%.20lf\n", static_cast<double>(value)); } TEST(TensorTestUtilTest, ExpectTensorNearHalf) { typedef Eigen::half T; EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0)); EXPECT_TRUE(IsClose(static_cast<T>(0.0f), static_cast<T>(-0.0f), 0.0, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141592f), 0.0, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.0078125, 0.0)); EXPECT_FALSE( IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.007, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.5, 0.0)); EXPECT_FALSE( IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.4, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(1234.f), static_cast<T>(1235.f), 1.0, 0.0)); EXPECT_FALSE( IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 0.5, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 1.0, 0.0)); EXPECT_TRUE( IsClose(static_cast<T>(-2.71f), static_cast<T>(-2.72f), 0.01, 0.0)); TestEdgeCasesNear<T>(); } TEST(TensorTestUtilTest, ExpectTensorNearFloat) { typedef float T; EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f)); EXPECT_TRUE(IsClose(0.0f, -0.0f, 0.0f, 0.0f)); EXPECT_TRUE(IsClose(3.14159265359f, 3.14159265359f, 0.0f, 0.0f)); EXPECT_TRUE(IsClose(8.9875f, 8.9876f, 0.0001002f, 0.0f)); EXPECT_FALSE(IsClose(8.9875f, 8.9876f, 0.0001f, 0.0f)); EXPECT_TRUE(IsClose(720.2017f, 720.2018f, 0.0001f, 0.0f)); EXPECT_FALSE(IsClose(720.20175f, 720.20185f, 0.0001f, 0.0f)); EXPECT_TRUE(IsClose(720.20175f, 720.20185f, 0.00013f, 0.0f)); EXPECT_FALSE(IsClose(123456788.f, 123456789.f, 4.0f, 0.0f)); EXPECT_TRUE(IsClose(123456788.f, 123456789.f, 8.0f, 0.0f)); EXPECT_TRUE(IsClose(-2.718281f, -2.718282f, 0.1f, 0.0f)); TestEdgeCasesNear<T>(); } TEST(TensorTestUtilTest, ExpectTensorNearDouble) { typedef double T; EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0)); EXPECT_TRUE(IsClose(0.0, -0.0, 0.0, 0.0)); EXPECT_TRUE(IsClose(3.14159265359, 3.14159265359, 0.0, 0.0)); EXPECT_TRUE(IsClose(8.9875, 8.9876, 0.0001, 0.0)); EXPECT_FALSE(IsClose(100720.2018, 100720.2019, 0.0001, 0.0)); EXPECT_TRUE(IsClose(100720.2018, 100720.2019, 1.00000005e-4, 0.0)); EXPECT_FALSE(IsClose(12345678901234567., 12345678901234566., 1.0, 0.0)); EXPECT_TRUE(IsClose(12345678901234567., 12345678901234566., 2.0, 0.0)); EXPECT_FALSE(IsClose(-2.71828182846, -2.71828182847, 1.0e-11, 0.0)); EXPECT_TRUE(IsClose(-2.71828182846, -2.71828182847, 1.00000009e-11, 0.0)); TestEdgeCasesNear<T>(); } TEST(TensorTestUtilTest, ExpectTensorNearSlice) { Tensor x(DT_FLOAT, TensorShape({7, 3})); test::FillFn<float>(&x, [](int i) { return 1.0f; }); test::ExpectTensorNear<float>( x.SubSlice(3), test::AsTensor<float>({1.0, 1.0, 1.0}, TensorShape({3})), 1e-10); } template <typename T> void TestEdgeCasesClose() { EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(), Eigen::NumTraits<T>::infinity(), 0.0, 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(), Eigen::NumTraits<double>::infinity(), Eigen::NumTraits<double>::infinity())); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(), static_cast<double>(Eigen::NumTraits<T>::highest()), static_cast<double>(Eigen::NumTraits<T>::highest()))); EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0)); EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), Eigen::NumTraits<double>::infinity(), 0.0)); EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<T>::quiet_NaN(), Eigen::NumTraits<double>::infinity(), 0.0)); } TEST(TensorTestUtilTest, ExpectTensorCloseHalf) { typedef Eigen::half T; EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.1, 0.1)); EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0)); EXPECT_FALSE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.0, 0.0)); EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.234f))); EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.233f))); EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.235f))); EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f))); EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f))); EXPECT_TRUE( IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f), 8e-4f, 1e-3f)); EXPECT_TRUE( IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f), 1.4e-3f, 5e-4f)); EXPECT_TRUE( IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141593f), 0.0, 0.0)); EXPECT_FALSE(IsClose(static_cast<T>(1e4f), static_cast<T>(1e-4f))); TestEdgeCasesClose<T>(); } TEST(TensorTestUtilTest, ExpectTensorCloseFloat) { typedef float T; EXPECT_TRUE(IsClose(1.0f, 1.1f, 0.1f, 0.1f)); EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f)); EXPECT_FALSE(IsClose(1.0f, 1.1f, 0.0f, 0.0f)); EXPECT_TRUE(IsClose(1.234567f, 1.234567f)); EXPECT_TRUE(IsClose(1.234567f, 1.234568f)); EXPECT_TRUE(IsClose(1.234567f, 1.234566f)); EXPECT_FALSE(IsClose(1.234567f, 1.234569f)); EXPECT_FALSE(IsClose(1.234567f, 1.234565f)); EXPECT_TRUE(IsClose(1.234567f, 1.234569f, 8e-7f, 1e-6f)); EXPECT_TRUE(IsClose(1.234567f, 1.234565f, 3e-7f, 1.5e-6f)); EXPECT_TRUE(IsClose(3.14159265f, 3.14159266f, 0.0f, 0.0f)); EXPECT_FALSE(IsClose(1e8f, 1e-8f)); EXPECT_FALSE(IsClose(1e15f, 1e-15f)); TestEdgeCasesClose<T>(); } TEST(TensorTestUtilTest, ExpectTensorCloseDouble) { typedef double T; EXPECT_TRUE(IsClose(1.0, 1.1, 0.1, 0.1)); EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0)); EXPECT_FALSE(IsClose(1.0, 1.1, 0.0, 0.0)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123456)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123457)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123455)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123458)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123454)); EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123459)); EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123453)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123459, 9.5e-16, 1.6e-15)); EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123453, 7e-16, 2e-15)); EXPECT_TRUE(IsClose(3.141592653589793238, 3.141592653589793239, 0.0, 0.0)); EXPECT_FALSE(IsClose(1e15, 1e-15)); EXPECT_FALSE(IsClose(1e30, 1e-30)); TestEdgeCasesClose<T>(); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5c396182-72ac-466f-b33e-1ce61dd04742
cpp
google/quiche
nghttp2
quiche/http2/adapter/nghttp2.h
quiche/http2/adapter/nghttp2_test.cc
#ifndef QUICHE_HTTP2_ADAPTER_NGHTTP2_H_ #define QUICHE_HTTP2_ADAPTER_NGHTTP2_H_ #include <cstddef> using ssize_t = ptrdiff_t; #include "nghttp2/nghttp2.h" #endif
#include "quiche/http2/adapter/nghttp2.h" #include <string> #include <utility> #include <vector> #include "absl/strings/str_cat.h" #include "quiche/http2/adapter/mock_nghttp2_callbacks.h" #include "quiche/http2/adapter/nghttp2_test_utils.h" #include "quiche/http2/adapter/nghttp2_util.h" #include "quiche/http2/adapter/test_frame_sequence.h" #include "quiche/http2/adapter/test_utils.h" #include "quiche/common/platform/api/quiche_test.h" namespace http2 { namespace adapter { namespace test { namespace { using testing::_; enum FrameType { DATA, HEADERS, PRIORITY, RST_STREAM, SETTINGS, PUSH_PROMISE, PING, GOAWAY, WINDOW_UPDATE, }; nghttp2_option* GetOptions() { nghttp2_option* options; nghttp2_option_new(&options); nghttp2_option_set_no_closed_streams(options, 1); nghttp2_option_set_no_auto_window_update(options, 1); nghttp2_option_set_max_send_header_block_length(options, 0x2000000); nghttp2_option_set_max_outbound_ack(options, 10000); return options; } class Nghttp2Test : public quiche::test::QuicheTest { public: Nghttp2Test() : session_(MakeSessionPtr(nullptr)) {} void SetUp() override { InitializeSession(); } virtual Perspective GetPerspective() = 0; void InitializeSession() { auto nghttp2_callbacks = MockNghttp2Callbacks::GetCallbacks(); nghttp2_option* options = GetOptions(); nghttp2_session* ptr; if (GetPerspective() == Perspective::kClient) { nghttp2_session_client_new2(&ptr, nghttp2_callbacks.get(), &mock_callbacks_, options); } else { nghttp2_session_server_new2(&ptr, nghttp2_callbacks.get(), &mock_callbacks_, options); } nghttp2_option_del(options); EXPECT_CALL(mock_callbacks_, Send(_, _, _)) .WillRepeatedly( [this](const uint8_t* data, size_t length, int ) { absl::StrAppend(&serialized_, ToStringView(data, length)); return length; }); EXPECT_CALL(mock_callbacks_, SendData(_, _, _, _)) .WillRepeatedly([this](nghttp2_frame* , const uint8_t* framehd, size_t length, nghttp2_data_source* source) { QUICHE_LOG(INFO) << "Appending frame header and " << length << " bytes of data"; auto* s = static_cast<TestDataSource*>(source->ptr); absl::StrAppend(&serialized_, ToStringView(framehd, 9), s->ReadNext(length)); return 0; }); session_ = MakeSessionPtr(ptr); } testing::StrictMock<MockNghttp2Callbacks> mock_callbacks_; nghttp2_session_unique_ptr session_; std::string serialized_; }; class Nghttp2ClientTest : public Nghttp2Test { public: Perspective GetPerspective() override { return Perspective::kClient; } }; TEST_F(Nghttp2ClientTest, ClientReceivesUnexpectedHeaders) { const std::string initial_frames = TestFrameSequence() .ServerPreface() .Ping(42) .WindowUpdate(0, 1000) .Serialize(); testing::InSequence seq; EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0))); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty()))); EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, PING, 0))); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsPing(42))); EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, WINDOW_UPDATE, 0))); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsWindowUpdate(1000))); ssize_t result = nghttp2_session_mem_recv( session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size()); ASSERT_EQ(result, initial_frames.size()); const std::string unexpected_stream_frames = TestFrameSequence() .Headers(1, {{":status", "200"}, {"server", "my-fake-server"}, {"date", "Tue, 6 Apr 2021 12:54:01 GMT"}}, false) .Data(1, "This is the response body.") .RstStream(3, Http2ErrorCode::INTERNAL_ERROR) .GoAway(5, Http2ErrorCode::ENHANCE_YOUR_CALM, "calm down!!") .Serialize(); EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(1, HEADERS, _))); EXPECT_CALL(mock_callbacks_, OnInvalidFrameRecv(IsHeaders(1, _, _), _)); nghttp2_session_mem_recv(session_.get(), ToUint8Ptr(unexpected_stream_frames.data()), unexpected_stream_frames.size()); } TEST_F(Nghttp2ClientTest, ClientSendsRequest) { int result = nghttp2_session_send(session_.get()); ASSERT_EQ(result, 0); EXPECT_THAT(serialized_, testing::StrEq(spdy::kHttp2ConnectionHeaderPrefix)); serialized_.clear(); const std::string initial_frames = TestFrameSequence().ServerPreface().Serialize(); testing::InSequence s; EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0))); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty()))); ssize_t recv_result = nghttp2_session_mem_recv( session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size()); EXPECT_EQ(initial_frames.size(), recv_result); EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsSettings(testing::IsEmpty()))); EXPECT_CALL(mock_callbacks_, OnFrameSend(IsSettings(testing::IsEmpty()))); EXPECT_TRUE(nghttp2_session_want_write(session_.get())); result = nghttp2_session_send(session_.get()); EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::SETTINGS})); serialized_.clear(); EXPECT_FALSE(nghttp2_session_want_write(session_.get())); std::vector<std::pair<absl::string_view, absl::string_view>> headers = { {":method", "POST"}, {":scheme", "http"}, {":authority", "example.com"}, {":path", "/this/is/request/one"}}; std::vector<nghttp2_nv> nvs; for (const auto& h : headers) { nvs.push_back({.name = ToUint8Ptr(h.first.data()), .value = ToUint8Ptr(h.second.data()), .namelen = h.first.size(), .valuelen = h.second.size(), .flags = NGHTTP2_NV_FLAG_NONE}); } const absl::string_view kBody = "This is an example request body."; TestDataSource source{kBody}; nghttp2_data_provider provider = source.MakeDataProvider(); int stream_id = nghttp2_submit_request(session_.get(), nullptr , nvs.data(), nvs.size(), &provider, nullptr ); EXPECT_GT(stream_id, 0); EXPECT_TRUE(nghttp2_session_want_write(session_.get())); EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsHeaders(stream_id, _, _))); EXPECT_CALL(mock_callbacks_, OnFrameSend(IsHeaders(stream_id, _, _))); EXPECT_CALL(mock_callbacks_, OnFrameSend(IsData(stream_id, kBody.size(), _))); nghttp2_session_send(session_.get()); EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::HEADERS, spdy::SpdyFrameType::DATA})); EXPECT_THAT(serialized_, testing::HasSubstr(kBody)); EXPECT_FALSE(nghttp2_session_want_write(session_.get())); } class Nghttp2ServerTest : public Nghttp2Test { public: Perspective GetPerspective() override { return Perspective::kServer; } }; TEST_F(Nghttp2ServerTest, MismatchedContentLength) { const std::string initial_frames = TestFrameSequence() .ClientPreface() .Headers(1, {{":method", "POST"}, {":scheme", "https"}, {":authority", "example.com"}, {":path", "/"}, {"content-length", "50"}}, false) .Data(1, "Less than 50 bytes.", true) .Serialize(); testing::InSequence seq; EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, _))); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty()))); EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader( 1, HEADERS, NGHTTP2_FLAG_END_HEADERS))); EXPECT_CALL(mock_callbacks_, OnBeginHeaders(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS, NGHTTP2_HCAT_REQUEST))); EXPECT_CALL(mock_callbacks_, OnHeader(_, ":method", "POST", _)); EXPECT_CALL(mock_callbacks_, OnHeader(_, ":scheme", "https", _)); EXPECT_CALL(mock_callbacks_, OnHeader(_, ":authority", "example.com", _)); EXPECT_CALL(mock_callbacks_, OnHeader(_, ":path", "/", _)); EXPECT_CALL(mock_callbacks_, OnHeader(_, "content-length", "50", _)); EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS, NGHTTP2_HCAT_REQUEST))); EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(1, DATA, NGHTTP2_FLAG_END_STREAM))); EXPECT_CALL(mock_callbacks_, OnDataChunkRecv(NGHTTP2_FLAG_END_STREAM, 1, "Less than 50 bytes.")); ssize_t result = nghttp2_session_mem_recv( session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size()); ASSERT_EQ(result, initial_frames.size()); } } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2.h
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
17e0abbc-0854-4ec0-a080-901466566b64
cpp
tensorflow/tensorflow
snapshot_manager
tensorflow/core/data/service/snapshot/snapshot_manager.cc
tensorflow/core/data/service/snapshot/snapshot_manager_test.cc
#include "tensorflow/core/data/service/snapshot/snapshot_manager.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "xla/tsl/lib/io/compression.h" #include "xla/tsl/protobuf/status.pb.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/dispatcher.pb.h" #include "tensorflow/core/data/service/snapshot/file_utils.h" #include "tensorflow/core/data/service/snapshot/path_utils.h" #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h" #include "tensorflow/core/data/service/split_provider.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/mutex.h" #include "tsl/platform/path.h" #include "tsl/platform/status_to_from_proto.h" #include "tsl/platform/thread_annotations.h" #include "tsl/platform/threadpool.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { const absl::Duration kProgressLoggingInterval = absl::Minutes(1); absl::StatusOr<int64_t> CountSplits(SplitProvider& split_provider) { if (split_provider.Cardinality() != kUnknownCardinality) { return split_provider.Cardinality(); } int64_t num_splits = 0; Tensor tensor; for (bool end_of_splits = false; !end_of_splits; ++num_splits) { TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits)); } --num_splits; TF_RETURN_IF_ERROR(split_provider.Reset()); return num_splits; } absl::Status SkipSplit(SplitProvider& split_provider, int64_t& repetition_index) { Tensor tensor; bool end_of_splits = false; TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits)); while (end_of_splits) { ++repetition_index; TF_RETURN_IF_ERROR(split_provider.Reset()); TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits)); } return absl::OkStatus(); } std::string PrefetchedSplitDir(const std::string& snapshot_path, int64_t source_index) { return tsl::io::JoinPath(snapshot_path, "prefetched_splits", absl::StrCat("source_", source_index)); } } absl::StatusOr<bool> SnapshotAssignmentManager::TryAddAssignment( absl::string_view snapshot_path, absl::string_view worker_address, int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) { tsl::mutex_lock l(mu_); if (assignments_[worker_address].size() >= worker_max_concurrent_snapshots()) { return false; } Assignment assignment{std::string(snapshot_path), stream_index}; auto [unused, success] = assignments_[worker_address].insert(assignment); if (!success) { return absl::InternalError(absl::StrCat("Worker ", worker_address, " already had an assignment for ", assignment.DebugString())); } ++snapshot_assignment_counts_[snapshot_path]; return true; } void SnapshotAssignmentManager::RemoveAssignment( absl::string_view snapshot_path, absl::string_view worker_address, int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) { tsl::mutex_lock l(mu_); auto num_erased = assignments_[worker_address].erase( {std::string(snapshot_path), stream_index}); if ((snapshot_assignment_counts_[snapshot_path] -= num_erased) <= 0) { snapshot_assignment_counts_.erase(snapshot_path); } } void SnapshotAssignmentManager::AddSnapshot(absl::string_view snapshot_path) TF_LOCKS_EXCLUDED(mu_) { tsl::mutex_lock l(mu_); if (!snapshot_assignment_counts_.contains(snapshot_path)) { snapshot_assignment_counts_[snapshot_path] = 0; } } std::vector<std::string> SnapshotAssignmentManager::LoadBalanceSnapshots( absl::string_view worker_address) TF_LOCKS_EXCLUDED(mu_) { std::vector<std::string> result; tsl::mutex_lock l(mu_); result.reserve(snapshot_assignment_counts_.size()); const auto it = assignments_.find(worker_address); if (it != assignments_.end()) { for (const Assignment& assignment : it->second) { result.push_back(assignment.snapshot_path); } } if (result.size() >= worker_max_concurrent_snapshots()) { return result; } absl::btree_multimap<size_t, std::string> snapshots_by_count; for (const auto& [snapshot, count] : snapshot_assignment_counts_) { snapshots_by_count.emplace(count, snapshot); } for (const auto& [_, snapshot] : snapshots_by_count) { if (absl::c_find(result, snapshot) == result.end()) { result.push_back(snapshot); return result; } } return result; } absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Start( const SnapshotRequest& request, SnapshotAssignmentManager& assignment_manager, Env* env) { std::unique_ptr<SnapshotManager> snapshot_manager{ new SnapshotManager{request.path(), assignment_manager, env}}; TF_RETURN_IF_ERROR(snapshot_manager->Start(request)); return snapshot_manager; } absl::Status SnapshotManager::Start(const SnapshotRequest& request) TF_LOCKS_EXCLUDED(mu_) { LOG(INFO) << "Starting to write tf.data snapshot at " << request.path(); if (env_->FileExists(request.path()).ok()) { return errors::AlreadyExists("tf.data snapshot at ", request.path(), " already exists."); } tsl::mutex_lock l(mu_); TF_RETURN_IF_ERROR(WriteOnDiskSkeleton()); TF_RETURN_IF_ERROR(WriteOnDiskMetadata(request)); TF_ASSIGN_OR_RETURN(sources_, CreateSources(request.dataset())); TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality()); metadata_ = request.metadata(); LOG(INFO) << "Started writing tf.data distributed snapshot at " << path_; return absl::OkStatus(); } absl::StatusOr<std::vector<SnapshotManager::Source>> SnapshotManager::CreateSources(const DatasetDef& dataset_def) const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { std::vector<std::unique_ptr<SplitProvider>> split_providers; TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers)); std::vector<SnapshotManager::Source> sources; sources.reserve(split_providers.size()); for (size_t i = 0; i < split_providers.size(); ++i) { TF_ASSIGN_OR_RETURN(size_t cardinality, CountSplits(*split_providers[i])); sources.emplace_back( std::make_unique<PrefetchedSplitProvider>( std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_), 0, cardinality); } return sources; } absl::StatusOr<int64_t> SnapshotManager::GetSplitsCardinality() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return absl::c_accumulate(sources_, 0, [](size_t cardinality, const Source& source) { return cardinality + source.cardinality; }); } absl::Status SnapshotManager::WriteOnDiskSkeleton() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { TF_RETURN_IF_ERROR( env_->RecursivelyCreateDir(CommittedChunksDirectory(path_))); TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(StreamsDirectory(path_))); return absl::OkStatus(); } absl::Status SnapshotManager::WriteOnDiskMetadata( const SnapshotRequest& request) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotMetadataFilePath(path_), request.metadata(), env_)); TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile( DatasetSpecFilePath(path_), request.metadata().element_spec(), env_)); TF_RETURN_IF_ERROR(AtomicallyWriteBinaryProto(DatasetDefFilePath(path_), request.dataset(), env_)); return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Resume( absl::string_view path, SnapshotAssignmentManager& assignment_manager, Env* env) { SnapshotManager* snapshot_manager = new SnapshotManager(path, assignment_manager, env); TF_RETURN_IF_ERROR(snapshot_manager->Resume()); return absl::WrapUnique(snapshot_manager); } absl::Status SnapshotManager::Resume() TF_LOCKS_EXCLUDED(mu_) { tsl::mutex_lock l(mu_); if (!env_->FileExists(path_).ok()) { return absl::InternalError( absl::StrCat("Failed to recover tf.data snapshot at ", path_, ": the snapshot path doesn't exist.")); } if (env_->FileExists(SnapshotDoneFilePath(path_)).ok()) { mode_ = Mode::kDone; LOG(INFO) << "Recovered finished tf.data snapshot at " << path_; return absl::OkStatus(); } if (env_->FileExists(SnapshotErrorFilePath(path_)).ok()) { mode_ = Mode::kError; StatusProto status_proto; TF_RETURN_IF_ERROR( ReadTextProto(env_, SnapshotErrorFilePath(path_), &status_proto)); status_ = tsl::StatusFromProto(status_proto); return absl::OkStatus(); } TF_RETURN_IF_ERROR(ReadOnDiskMetadata()); TF_RETURN_IF_ERROR(ReadOnDiskStreams()); LOG(INFO) << "Resumed writing tf.data distributed snapshot at " << path_; return absl::OkStatus(); } absl::Status SnapshotManager::ReadOnDiskMetadata() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!env_->FileExists(SnapshotMetadataFilePath(path_)).ok()) { return absl::InternalError( absl::StrCat("Failed to recover snapshot at ", path_, ": snapshot has no snapshot.metadata")); } TF_RETURN_IF_ERROR( ReadTextProto(env_, SnapshotMetadataFilePath(path_), &metadata_)); if (!env_->FileExists(DatasetDefFilePath(path_)).ok()) { return absl::InternalError( absl::StrCat("Failed to recovery snapshot at ", path_, ": snapshot has no dataset_def.proto")); } return absl::OkStatus(); } absl::Status SnapshotManager::ReadOnDiskStreams() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { std::string streams_path = StreamsDirectory(path_); TF_ASSIGN_OR_RETURN(const std::vector<std::string> stream_directories, GetChildren(streams_path, env_)); DatasetDef dataset_def; TF_RETURN_IF_ERROR( tsl::ReadBinaryProto(env_, DatasetDefFilePath(path_), &dataset_def)); std::vector<std::unique_ptr<SplitProvider>> split_providers; TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers)); std::vector<int64_t> repetition_indices(split_providers.size(), 0); std::vector<int64_t> cardinalities; for (size_t i = 0; i < split_providers.size(); ++i) { TF_ASSIGN_OR_RETURN(int64_t cardinality, CountSplits(*split_providers[i])); cardinalities.push_back(cardinality); } tsl::mutex mu; absl::Status resume_status; absl::flat_hash_set<int64_t> global_split_indices; auto thread_pool = std::make_unique<tsl::thread::ThreadPool>( env_, tsl::ThreadOptions{}, "restore_snapshot_stream_thread", std::max(size_t{1}, stream_directories.size())); for (const auto& stream_directory : stream_directories) { std::string stream_path = tsl::io::JoinPath(streams_path, stream_directory); std::vector<std::string> tokens = absl::StrSplit(stream_directory, '_'); int64_t stream_index; if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0) { return absl::InternalError(absl::StrCat( "Can't parse tf.data snapshot stream directory ", stream_path, ": filename must have the format stream_<stream_index>.")); } thread_pool->Schedule([this, &stream_directories, stream_index, &split_providers, &repetition_indices, &global_split_indices, &resume_status, &mu]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { StreamRestorer stream_restorer(env_, path_, stream_index, split_providers.size(), assignment_manager_); absl::Status s = stream_restorer.ReadOnDiskStream(); tsl::mutex_lock l(mu); resume_status.Update(s); resume_status.Update(RestoreFrom(stream_restorer, stream_directories, split_providers, repetition_indices, global_split_indices)); }); } thread_pool.reset(); TF_RETURN_IF_ERROR(resume_status); for (int64_t i = 0; i < split_providers.size(); ++i) { sources_.emplace_back( std::make_unique<PrefetchedSplitProvider>( std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_), repetition_indices[i], cardinalities[i]); } TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality()); for (int64_t i = 0; i < global_split_indices.size(); ++i) { if (!global_split_indices.contains(i)) { return absl::InternalError( absl::StrCat("Failed to restore tf.data snapshot at ", path_, ": Found missing global split index ", i, ".")); } } num_assigned_splits_ = global_split_indices.size(); if (!streams_.empty() && absl::c_all_of(streams_, [](const auto& stream) { return stream.second.state == Stream::State::kDone; })) { mode_ = Mode::kDone; TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_), std::string(), env_)); LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_; } return absl::OkStatus(); } absl::StatusOr<std::string> SnapshotManager::StreamRestorer::OwnerWorkerAddress() const { std::string worker_address; TF_RETURN_IF_ERROR( env_->FileExists(StreamWorkerFilePath(path_, stream_index_))); TF_RETURN_IF_ERROR(tsl::ReadFileToString( env_, StreamWorkerFilePath(path_, stream_index_), &worker_address)); return worker_address; } absl::Status SnapshotManager::StreamRestorer::ReadOnDiskStream() { absl::StatusOr<std::string> worker_address = OwnerWorkerAddress(); if (!worker_address.ok()) { return absl::OkStatus(); } worker_address_ = *worker_address; restored_stream_.emplace(num_sources_); std::string splits_path = SplitsDirectory(path_, stream_index_); TF_ASSIGN_OR_RETURN(std::vector<std::string> source_directories, GetChildren(splits_path, env_)); for (const auto& source_directory : source_directories) { std::string source_path = tsl::io::JoinPath(splits_path, source_directory); std::vector<std::string> tokens = absl::StrSplit(source_directory, '_'); int64_t source_index = 0; if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &source_index) || source_index < 0) { return absl::InternalError(absl::StrCat( "Can't parse tf.data snapshot source directory ", source_path, ": filename must have the format source_<source_index>.")); } if (source_index >= num_sources_) { return absl::InternalError( absl::StrCat("Found conflict between the number of sources, ", num_sources_, ", and the filename of ", source_path)); } TF_RETURN_IF_ERROR(ReadOnDiskSource(source_index)); } if (env_->FileExists(StreamDoneFilePath(path_, stream_index_)).ok()) { restored_stream_->state = Stream::State::kDone; return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(bool assignment_added, assignment_manager_.TryAddAssignment( path_, *worker_address, stream_index_)); if (!assignment_added) { return absl::InternalError(absl::StrCat( "Failed to recover tf.data snapshot dispatcher: Worker ", *worker_address, " was assigned too many streams. At most ", assignment_manager_.worker_max_concurrent_snapshots(), " streams are allowed.")); } return absl::OkStatus(); } absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSource( int64_t source_index) { std::string source_directory = SourceDirectory(path_, stream_index_, source_index); TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories, GetChildren(source_directory, env_)); for (const std::string& repetition : repetition_directories) { std::string repetition_dir = tsl::io::JoinPath(source_directory, repetition); TF_ASSIGN_OR_RETURN(std::vector<std::string> split_files, GetChildren(repetition_dir, env_)); for (const std::string& split_file : split_files) { std::string split_path = tsl::io::JoinPath(repetition_dir, split_file); TF_RETURN_IF_ERROR( ReadOnDiskSplit(source_index, split_files, split_path)); } restored_stream_->num_assigned_splits_per_source[source_index] += split_files.size(); } return absl::OkStatus(); } absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSplit( int64_t source_index, const std::vector<std::string>& split_files, const std::string& split_file) { TF_ASSIGN_OR_RETURN(auto split_indices, ParseSplitFilename(split_file)); auto [local_split_index, global_split_index] = split_indices; if (global_split_indices_.contains(global_split_index)) { return absl::InternalError(absl::StrCat( "Failed to restore tf.data snapshot at ", path_, ": Found duplicate global split index in split ", split_file, ".")); } global_split_indices_.insert(global_split_index); return absl::OkStatus(); } absl::Status SnapshotManager::RestoreFrom( const StreamRestorer& stream_restorer, const std::vector<std::string>& stream_directories, std::vector<std::unique_ptr<SplitProvider>>& split_providers, std::vector<std::int64_t>& repetition_indices, absl::flat_hash_set<int64_t>& global_split_indices) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!stream_restorer.GetStream().has_value()) { return absl::OkStatus(); } streams_.insert( {stream_restorer.StreamIndex(), *stream_restorer.GetStream()}); auto [it, success] = assignments_.insert( {stream_restorer.WorkerAddress(), stream_restorer.StreamIndex()}); if (!success) { return absl::InternalError(absl::StrCat( "tf.data dispatcher failed to assign stream ", stream_restorer.StreamIndex(), " to snapshot worker ", stream_restorer.WorkerAddress(), ": The worker is already assigned stream ", it->second, ".")); } for (int64_t source_index = 0; source_index < repetition_indices.size(); ++source_index) { int64_t skip_splits = GetStream(stream_restorer.StreamIndex()) .num_assigned_splits_per_source[source_index]; for (int64_t i = 0; i < skip_splits; ++i) { TF_RETURN_IF_ERROR(SkipSplit(*split_providers[source_index], repetition_indices[source_index])); } } for (int64_t global_split_index : stream_restorer.GlobalSplitIndices()) { if (global_split_indices.contains(global_split_index)) { return absl::InternalError( absl::StrCat("Failed to restore tf.data snapshot at ", path_, ": Found ", "duplicate global split index in stream ", stream_restorer.StreamIndex(), ".")); } global_split_indices.insert(global_split_index); } return absl::OkStatus(); } SnapshotManager::Stream& SnapshotManager::GetStream(int64_t stream_index) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { auto [it, _] = streams_.try_emplace(stream_index, num_sources()); return it->second; } absl::Status SnapshotManager::HandleStreamCompletion( int64_t stream_index, absl::string_view worker_address) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { GetStream(stream_index).state = Stream::State::kDone; assignment_manager_.RemoveAssignment(path_, worker_address, stream_index); ++num_completed_streams_; if (absl::c_all_of(streams_, [](const auto& stream) { return stream.second.state == Stream::State::kDone; })) { mode_ = Mode::kDone; TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_), std::string(), env_)); LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_; } return absl::OkStatus(); } absl::Status SnapshotManager::HandleStreamError( absl::string_view worker_address, const StatusProto& status_proto) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!status_.ok()) { return absl::OkStatus(); } mode_ = Mode::kError; status_ = tsl::StatusFromProto(status_proto); TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotErrorFilePath(path_), status_proto, env_)); LOG(ERROR) << "Failed to write tf.data distributed snapshot at " << path_ << ". Worker " << worker_address << " reported error: " << status_; return absl::OkStatus(); } absl::StatusOr<std::optional<int64_t>> SnapshotManager::MaybeCreateAndAssignNewStream(absl::string_view worker_address) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { int64_t new_stream_index = streams_.empty() ? 0 : streams_.rbegin()->first + 1; TF_ASSIGN_OR_RETURN(bool assignment_added, assignment_manager_.TryAddAssignment( path_, worker_address, new_stream_index)); if (!assignment_added) { return std::optional<int64_t>(); } streams_.insert({new_stream_index, Stream(num_sources())}); assignments_[worker_address] = new_stream_index; return new_stream_index; } absl::StatusOr<std::optional<std::pair<int64_t, bool>>> SnapshotManager::MaybeGetOrCreateStreamAssignment( absl::string_view worker_address, const SnapshotTaskProgress* snapshot_progress) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { std::optional<int64_t> assigned_stream_index; if (auto it = assignments_.find(worker_address); it != assignments_.end()) { assigned_stream_index = it->second; } if (snapshot_progress) { if (assigned_stream_index.has_value() && *assigned_stream_index != snapshot_progress->snapshot_task().stream_index()) { return absl::InternalError(absl::StrCat( "tf.data snapshot worker ", worker_address, " was assigned stream ", snapshot_progress->snapshot_task().stream_index(), ", but is now assigned a different stream ", *assigned_stream_index)); } if (assigned_stream_index.has_value() && snapshot_progress->completed()) { TF_RETURN_IF_ERROR(HandleStreamCompletion( snapshot_progress->snapshot_task().stream_index(), worker_address)); return std::nullopt; } if (snapshot_progress->status().code() != error::OK) { TF_RETURN_IF_ERROR( HandleStreamError(worker_address, snapshot_progress->status())); return std::nullopt; } } if (!assigned_stream_index) { if (mode_ != Mode::kActive) { return std::nullopt; } TF_ASSIGN_OR_RETURN(assigned_stream_index, MaybeCreateAndAssignNewStream(worker_address)); if (!assigned_stream_index.has_value()) { return std::nullopt; } return std::make_pair(*assigned_stream_index, true); } if (!assigned_stream_index.has_value() || GetStream(*assigned_stream_index).state == Stream::State::kDone) { return std::nullopt; } return std::make_pair(*assigned_stream_index, false); } absl::Status SnapshotManager::WorkerHeartbeat( const WorkerHeartbeatRequest& request, WorkerHeartbeatResponse& response) TF_LOCKS_EXCLUDED(mu_) { std::optional<std::pair<int64_t, bool>> assigned_stream_index; std::vector<int64_t> repetitions_per_source; { tsl::mutex_lock l(mu_); dead_workers_.erase(request.worker_address()); if (mode_ == Mode::kDone || mode_ == Mode::kError) { return absl::OkStatus(); } if (absl::Time now = absl::FromUnixMicros(env_->NowMicros()); now - last_progress_log_time_ > kProgressLoggingInterval) { LOG(INFO) << "tf.data snapshot progress [" << path_ << "]: " << num_completed_streams_ << "/" << streams_.size() << " streams completed; " << num_assigned_splits_ << "/" << num_total_splits_ << " splits assigned or completed."; last_progress_log_time_ = now; } const SnapshotTaskProgress* snapshot_progress = nullptr; if (auto it = request.snapshot_task_progress().find(path_); it != request.snapshot_task_progress().end()) { snapshot_progress = &it->second; } if (snapshot_progress && snapshot_progress->completed() && mode_ == Mode::kActive) { mode_ = Mode::kWindingDown; } TF_ASSIGN_OR_RETURN(assigned_stream_index, MaybeGetOrCreateStreamAssignment( request.worker_address(), snapshot_progress)); if (!assigned_stream_index.has_value()) { return absl::OkStatus(); } SnapshotTaskDef* snapshot_task = response.add_snapshot_tasks(); snapshot_task->set_base_path(path_); snapshot_task->set_num_sources(num_sources()); *snapshot_task->mutable_metadata() = metadata_; snapshot_task->set_stream_index(assigned_stream_index->first); for (int64_t source_index = 0; source_index < num_sources(); ++source_index) { repetitions_per_source.push_back(sources_[source_index].repetition_index); } } const auto [stream_index, is_new_stream] = *assigned_stream_index; if (is_new_stream) { TF_RETURN_IF_ERROR(InitStreamDirectory( stream_index, request.worker_address(), repetitions_per_source)); LOG(INFO) << "For snapshot at " << path_ << ", created stream_" << stream_index << " and assigned to " << request.worker_address(); } return absl::OkStatus(); } absl::Status SnapshotManager::InitStreamDirectory( int64_t stream_index, const std::string& worker_address, const std::vector<int64_t>& repetitions_per_source) { for (int64_t source_index = 0; source_index < repetitions_per_source.size(); ++source_index) { for (int64_t repetition_index = 0; repetition_index <= repetitions_per_source[source_index]; ++repetition_index) { TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory( path_, stream_index, source_index, repetition_index))); } } return AtomicallyWriteStringToFile(StreamWorkerFilePath(path_, stream_index), worker_address, env_); } absl::Status SnapshotManager::GetSnapshotSplit( const GetSnapshotSplitRequest& request, GetSnapshotSplitResponse& response) TF_LOCKS_EXCLUDED(get_split_mu_, mu_) { int64_t local_split_index = 0; int64_t global_split_index = 0; PrefetchedSplitProvider* split_provider = nullptr; tsl::mutex_lock get_split_lock(get_split_mu_); { tsl::mutex_lock l(mu_); if (auto it = assignments_.find(request.worker_address()); it == assignments_.end()) { return absl::InternalError( absl::StrCat("tf.data snapshot worker ", request.worker_address(), " was assigned stream ", request.stream_index(), ", but the assignment is no longer available.")); } else if (it->second != request.stream_index()) { return absl::InternalError( absl::StrCat("tf.data snapshot worker ", request.worker_address(), " was assigned stream ", request.stream_index(), " but is now assigned a different stream ", it->second)); } Stream& stream = GetStream(request.stream_index()); local_split_index = stream.num_assigned_splits_per_source[request.source_index()]; global_split_index = num_assigned_splits_; response.set_local_split_index(local_split_index); Source& source = sources_[request.source_index()]; if (request.repetition_index() < source.repetition_index) { response.set_end_of_splits(true); return absl::OkStatus(); } while (request.repetition_index() > source.repetition_index) { TF_RETURN_IF_ERROR(ResetSource(source, request.source_index())); } split_provider = source.split_provider.get(); } std::string split_path = SplitPath( path_, request.stream_index(), request.source_index(), request.repetition_index(), local_split_index, global_split_index); TF_ASSIGN_OR_RETURN(std::optional<Tensor> split, split_provider->GetNext(split_path)); if (!split.has_value()) { response.set_end_of_splits(true); return absl::OkStatus(); } split->AsProtoTensorContent(response.mutable_split()); tsl::mutex_lock l(mu_); ++GetStream(request.stream_index()) .num_assigned_splits_per_source[request.source_index()]; ++num_assigned_splits_; return absl::OkStatus(); } absl::Status SnapshotManager::ResetSource(Source& source, int64_t source_index) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { TF_RETURN_IF_ERROR(source.split_provider->Reset()); ++source.repetition_index; LOG(INFO) << "Starting repetition_" << source.repetition_index << " " << "for snapshot " << path_ << ", source " << source_index; for (const auto& [stream_index, _] : streams_) { TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory( path_, stream_index, source_index, source.repetition_index))); } return absl::OkStatus(); } absl::Status SnapshotManager::GetSnapshotStreams( GetSnapshotStreamsResponse& response) TF_LOCKS_EXCLUDED(mu_) { tsl::tf_shared_lock l(mu_); for (const auto& [stream_index, stream] : streams_) { SnapshotStreamInfo* stream_info = response.add_streams(); stream_info->set_index(stream_index); stream_info->set_state(stream.state == Stream::State::kDone ? SnapshotStreamInfo::DONE : SnapshotStreamInfo::ASSIGNED); } return absl::OkStatus(); } void SnapshotManager::Cancel() { std::vector<PrefetchedSplitProvider*> split_providers_to_cancel; { tsl::mutex_lock l(mu_); for (Source& source : sources_) { split_providers_to_cancel.push_back(source.split_provider.get()); } } for (PrefetchedSplitProvider* split_provider : split_providers_to_cancel) { split_provider->Cancel(); } } } }
#include "tensorflow/core/data/service/snapshot/snapshot_manager.h" #include <memory> #include <string> #include "xla/tsl/lib/core/status_test_util.h" #include "xla/tsl/protobuf/status.pb.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/dispatcher.pb.h" #include "tensorflow/core/data/service/snapshot/path_utils.h" #include "tensorflow/core/data/service/test_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/status.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/status_to_from_proto.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { using ::testing::_; using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::Not; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; template <class T> T GetValue(const Tensor& tensor) { return tensor.unaligned_flat<T>().data()[0]; } TEST(SnapshotManagerTest, CreateStreamAssignment) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest request; *request.mutable_dataset() = testing::RangeDataset(10); request.set_path(snapshot_path); *request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(request, snapshot_assignment_manager, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1); EXPECT_EQ(heartbeat_response.snapshot_tasks(0).base_path(), snapshot_path); EXPECT_EQ(heartbeat_response.snapshot_tasks(0).stream_index(), 0); EXPECT_EQ(heartbeat_response.snapshot_tasks(0).num_sources(), 1); } TEST(SnapshotManagerTest, GetSnapshotSplit) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest request; *request.mutable_dataset() = testing::RangeDataset(10); request.set_path(snapshot_path); *request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(request, snapshot_assignment_manager, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0); GetSnapshotSplitRequest get_split_request; GetSnapshotSplitResponse get_split_response; get_split_request.set_worker_address("localhost"); get_split_request.set_base_path(task.base_path()); get_split_request.set_stream_index(task.stream_index()); get_split_request.set_source_index(0); for (int64_t i = 0; i < 10; ++i) { TF_ASSERT_OK(snapshot_manager->GetSnapshotSplit(get_split_request, get_split_response)); Tensor tensor; ASSERT_TRUE(tensor.FromProto(get_split_response.split())); EXPECT_EQ(GetValue<int64_t>(tensor), i); } } TEST(SnapshotManagerTest, HandleStreamCompletion) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest request; *request.mutable_dataset() = testing::RangeDataset(10); request.set_path(snapshot_path); *request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(request, snapshot_assignment_manager, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost:1"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); heartbeat_request.Clear(); heartbeat_response.Clear(); heartbeat_request.set_worker_address("localhost:2"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1); const SnapshotTaskDef& snapshot_task = heartbeat_response.snapshot_tasks(0); EXPECT_EQ(snapshot_task.base_path(), snapshot_path); EXPECT_EQ(snapshot_task.stream_index(), 1); EXPECT_EQ(snapshot_task.num_sources(), 1); heartbeat_request.Clear(); heartbeat_response.Clear(); heartbeat_request.set_worker_address("localhost:1"); SnapshotTaskProgress progress; *progress.mutable_snapshot_task() = snapshot_task; progress.set_completed(true); (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] = progress; TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty()); heartbeat_request.Clear(); heartbeat_response.Clear(); heartbeat_request.set_worker_address("localhost:1"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty()); } TEST(SnapshotManagerTest, Resume) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest request; *request.mutable_dataset() = testing::RangeDataset(10); request.set_path(snapshot_path); *request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager_1( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(request, snapshot_assignment_manager_1, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1)); heartbeat_response.Clear(); SnapshotAssignmentManager snapshot_assignment_manager_2( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> resumed_manager, SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2, Env::Default())); TF_EXPECT_OK( resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1)); } TEST(SnapshotManagerTest, SnapshotStreamError) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest snapshot_request; *snapshot_request.mutable_dataset() = testing::RangeDataset(10); snapshot_request.set_path(snapshot_path); *snapshot_request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(snapshot_request, snapshot_assignment_manager, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0); heartbeat_response.Clear(); SnapshotTaskProgress snapshot_task_progress; *snapshot_task_progress.mutable_snapshot_task() = task; *snapshot_task_progress.mutable_status() = tsl::StatusToProto(errors::NotFound("Not found")); (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] = snapshot_task_progress; TF_EXPECT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty()); TF_ASSERT_OK( Env::Default()->FileExists(SnapshotErrorFilePath(snapshot_path))); StatusProto status_proto; TF_ASSERT_OK(ReadTextProto( Env::Default(), SnapshotErrorFilePath(snapshot_path), &status_proto)); EXPECT_THAT(tsl::StatusFromProto(status_proto), StatusIs(error::NOT_FOUND, "Not found")); } TEST(SnapshotManagerTest, ResumeFromError) { std::string snapshot_path = testing::LocalTempFilename(); SnapshotRequest request; *request.mutable_dataset() = testing::RangeDataset(10); request.set_path(snapshot_path); *request.mutable_metadata() = testing::CreateDummyDistributedSnapshotMetadata(); SnapshotAssignmentManager snapshot_assignment_manager_1( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> snapshot_manager, SnapshotManager::Start(request, snapshot_assignment_manager_1, Env::Default())); WorkerHeartbeatRequest heartbeat_request; WorkerHeartbeatResponse heartbeat_response; heartbeat_request.set_worker_address("localhost"); TF_ASSERT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); ASSERT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1)); const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0); heartbeat_response.Clear(); SnapshotTaskProgress snapshot_task_progress; *snapshot_task_progress.mutable_snapshot_task() = task; *snapshot_task_progress.mutable_status() = tsl::StatusToProto(errors::NotFound("Not found")); (*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] = snapshot_task_progress; TF_EXPECT_OK( snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty()); heartbeat_response.Clear(); SnapshotAssignmentManager snapshot_assignment_manager_2( 2); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<SnapshotManager> resumed_manager, SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2, Env::Default())); TF_EXPECT_OK( resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response)); EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty()); } TEST(SnapshotAssignmentManagerTest, LoadBalanceSnapshots) { SnapshotAssignmentManager snapshot_assignment_manager( 2); snapshot_assignment_manager.AddSnapshot("snapshot_1"); snapshot_assignment_manager.AddSnapshot("snapshot_2"); snapshot_assignment_manager.AddSnapshot("snapshot_3"); EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment( "snapshot_3", "worker_1", 0), IsOkAndHolds(true)); EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"), ElementsAre("snapshot_3", _)); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"), ElementsAre(Not("snapshot_3"))); EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment( "snapshot_2", "worker_1", 0), IsOkAndHolds(true)); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"), UnorderedElementsAre("snapshot_2", "snapshot_3")); EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"), ElementsAre("snapshot_1")); EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment( "snapshot_1", "worker_1", 0), IsOkAndHolds(false)); EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment( "snapshot_2", "worker_2", 0), IsOkAndHolds(true)); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"), UnorderedElementsAre("snapshot_2", "snapshot_3")); EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"), ElementsAre("snapshot_2", "snapshot_1")); snapshot_assignment_manager.RemoveAssignment("snapshot_2", "worker_1", 0); EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"), ElementsAre("snapshot_3", "snapshot_1")); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"), ElementsAre("snapshot_2", "snapshot_1")); snapshot_assignment_manager.RemoveAssignment("snapshot_3", "worker_1", 0); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"), ElementsAre("snapshot_1")); ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"), ElementsAre("snapshot_2", "snapshot_1")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6fdff665-ed54-4a1b-8aa2-c0280a997a05
cpp
tensorflow/tensorflow
tf_threadpool_concurrent_work_queue
tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc
tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h" #include <memory> #include <optional> #include <utility> #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/threadpool.h" #include "tensorflow/core/platform/threadpool_interface.h" #include "tensorflow/core/tfrt/utils/thread_pool.h" #include "tfrt/host_context/async_value.h" #include "tfrt/host_context/execution_context.h" #include "tfrt/host_context/task_function.h" #include "tfrt/support/forward_decls.h" #include "tfrt/support/latch.h" namespace tensorflow { namespace tfrt_stub { using ::tensorflow::thread::ThreadPoolInterface; absl::StatusOr<std::unique_ptr<WorkQueueInterface>> TfThreadPoolWorkQueue::InitializeRequest(int64_t request_id) const { return {std::make_unique<TfThreadPoolWorkQueue>( request_id, intra_op_threadpool_, inter_op_threadpool_)}; } void TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) { auto* copy = new tfrt::TaskFunction( tensorflow::tfrt_stub::WrapWork(id(), "inter", std::move(work))); inter_op_threadpool_->Schedule([copy] { (*copy)(); delete copy; }); } std::optional<tfrt::TaskFunction> TfThreadPoolWorkQueue::AddBlockingTask( tfrt::TaskFunction work, bool allow_queuing) { AddTask(std::move(work)); return std::nullopt; } void TfThreadPoolWorkQueue::Quiesce() { } void TfThreadPoolWorkQueue::Await( tfrt::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) { tfrt::latch values_remaining(values.size()); for (auto& value : values) { value->AndThen([&values_remaining]() { values_remaining.count_down(); }); } values_remaining.wait(); } bool TfThreadPoolWorkQueue::IsInWorkerThread() const { return true; } std::unique_ptr<TfThreadPoolWorkQueue> CreateDefaultTfThreadPoolWorkQueue( int num_inter_op_threads, int num_intra_op_threads) { struct ThreadPools { TfThreadPool inter_op_threadpool; TfThreadPool intra_op_threadpool; ThreadPools(int num_inter_op_threads, int num_intra_op_threads) : inter_op_threadpool("default_work_queue_inter", num_inter_op_threads), intra_op_threadpool("default_work_queue_intra", num_intra_op_threads) {} }; class Wrapper : public TfThreadPoolWorkQueue { public: explicit Wrapper(std::unique_ptr<ThreadPools> thread_pools) : TfThreadPoolWorkQueue( &thread_pools->intra_op_threadpool, &thread_pools->inter_op_threadpool), thread_pools_(std::move(thread_pools)) {} ~Wrapper() override = default; private: std::unique_ptr<ThreadPools> thread_pools_; }; return std::make_unique<Wrapper>(std::make_unique<ThreadPools>( num_inter_op_threads, num_intra_op_threads)); } } }
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/tfrt/utils/thread_pool.h" #include "tfrt/host_context/host_allocator.h" #include "tfrt/host_context/host_context.h" #include "tfrt/support/latch.h" namespace tensorflow { namespace tfrt_stub { namespace { const int32_t kNumThreads = 2; class TfThreadpoolWorkQueueTest : public ::testing::Test { protected: TfThreadpoolWorkQueueTest() : tf_threadpool_cwq_(CreateDefaultTfThreadPoolWorkQueue( kNumThreads, kNumThreads)) {} std::unique_ptr<TfThreadPoolWorkQueue> tf_threadpool_cwq_; }; TEST_F(TfThreadpoolWorkQueueTest, GetParallelismLevelOk) { EXPECT_GT(tf_threadpool_cwq_->GetParallelismLevel(), 0); } TEST_F(TfThreadpoolWorkQueueTest, GetNameOk) { EXPECT_EQ(tf_threadpool_cwq_->name(), "TfThreadPoolWorkQueue"); } TEST_F(TfThreadpoolWorkQueueTest, InitializeRequestOk) { tfrt::RequestContextBuilder ctx_builder(nullptr, nullptr); auto queue = tf_threadpool_cwq_->InitializeRequest(0); TF_ASSERT_OK(queue.status()); EXPECT_NE(*queue, nullptr); EXPECT_NE((*queue)->GetIntraOpThreadPool(), nullptr); } TEST_F(TfThreadpoolWorkQueueTest, IsInWorkerThreadOk) { EXPECT_TRUE(tf_threadpool_cwq_->IsInWorkerThread()); } TEST_F(TfThreadpoolWorkQueueTest, RunningBlockingTask) { tfrt::latch latch(10); int n = 0; tensorflow::mutex m; for (int i = 0; i < 10; ++i) { tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] { { tensorflow::mutex_lock lock(m); ++n; } latch.count_down(); }), true); } latch.wait(); EXPECT_EQ(n, 10); } TEST_F(TfThreadpoolWorkQueueTest, RunningNonBlockingTask) { tfrt::latch latch(10); int n = 0; tensorflow::mutex m; for (int i = 0; i < 10; ++i) { tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] { { tensorflow::mutex_lock lock(m); ++n; } latch.count_down(); })); } latch.wait(); EXPECT_EQ(n, 10); } TEST_F(TfThreadpoolWorkQueueTest, RunningMixedTask) { tfrt::latch latch(20); int n = 0; tensorflow::mutex m; for (int i = 0; i < 10; ++i) { tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] { { tensorflow::mutex_lock lock(m); ++n; } latch.count_down(); })); tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] { { tensorflow::mutex_lock lock(m); ++n; } latch.count_down(); }), true); } latch.wait(); EXPECT_EQ(n, 20); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0ceab836-2f6f-4d6c-85b5-579dbed57ba0
cpp
tensorflow/tensorflow
dot_dimension_merger
third_party/xla/xla/service/dot_dimension_merger.cc
third_party/xla/xla/service/dot_dimension_merger_test.cc
#include "xla/service/dot_dimension_merger.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/layout_util.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { std::vector<int64_t> ShiftDimensions(absl::Span<const int64_t> dimensions, const int64_t start, const int64_t shift) { std::vector<int64_t> new_dimensions; new_dimensions.reserve(dimensions.size()); for (const int64_t i : dimensions) { if (i < start) { new_dimensions.push_back(i); } else { new_dimensions.push_back(i - shift); } } return new_dimensions; } class BatchDimensionMerger : public DfsHloRewriteVisitor { public: absl::Status HandleDot(HloInstruction* dot) override { const DotDimensionNumbers& dnums = dot->dot_dimension_numbers(); const Shape& lhs_shape = dot->operand(0)->shape(); const Shape& rhs_shape = dot->operand(1)->shape(); CHECK_EQ(dnums.lhs_batch_dimensions_size(), dnums.rhs_batch_dimensions_size()); const int64_t batch_dimension_count = dnums.lhs_batch_dimensions_size(); if (batch_dimension_count < 2 || !DistinctNumbersAreConsecutiveIfSorted(dnums.lhs_batch_dimensions()) || !DistinctNumbersAreConsecutiveIfSorted(dnums.rhs_batch_dimensions()) || !absl::c_is_sorted(dnums.lhs_batch_dimensions()) || !absl::c_is_sorted(dnums.rhs_batch_dimensions()) || !LayoutUtil::AreDimensionsConsecutive(lhs_shape.layout(), dnums.lhs_batch_dimensions()) || !LayoutUtil::AreDimensionsConsecutive(rhs_shape.layout(), dnums.rhs_batch_dimensions())) { return absl::OkStatus(); } const int64_t lhs_batch_dimension = *absl::c_min_element(dnums.lhs_batch_dimensions()); const int64_t rhs_batch_dimension = *absl::c_min_element(dnums.rhs_batch_dimensions()); int64_t batch_size = 1; for (const int64_t dimension_number : dnums.lhs_batch_dimensions()) { batch_size *= lhs_shape.dimensions(dimension_number); } auto merge_batch_dims = [&](Shape old_shape, int64_t batch_dim) { Shape new_shape = old_shape; for (int64_t i = 1; i < batch_dimension_count; ++i) { new_shape.DeleteDimension(batch_dim + 1); } new_shape.set_dimensions(batch_dim, batch_size); return new_shape; }; Shape new_lhs_shape = merge_batch_dims(lhs_shape, lhs_batch_dimension); Shape new_rhs_shape = merge_batch_dims(rhs_shape, rhs_batch_dimension); DotDimensionNumbers new_dot_dimension_numbers; new_dot_dimension_numbers.add_lhs_batch_dimensions(lhs_batch_dimension); new_dot_dimension_numbers.add_rhs_batch_dimensions(rhs_batch_dimension); { const std::vector<int64_t> shifted_contracting_dimensions = ShiftDimensions(dnums.lhs_contracting_dimensions(), lhs_batch_dimension, batch_dimension_count - 1); new_dot_dimension_numbers.mutable_lhs_contracting_dimensions()->Assign( shifted_contracting_dimensions.begin(), shifted_contracting_dimensions.end()); } { const std::vector<int64_t> shifted_contracting_dimensions = ShiftDimensions(dnums.rhs_contracting_dimensions(), rhs_batch_dimension, batch_dimension_count - 1); new_dot_dimension_numbers.mutable_rhs_contracting_dimensions()->Assign( shifted_contracting_dimensions.begin(), shifted_contracting_dimensions.end()); } auto sparsity = Cast<HloDotInstruction>(dot)->sparsity(); std::vector<SparsityDescriptor> new_sparsity(sparsity.begin(), sparsity.end()); std::vector<HloInstruction*> sparse_meta(sparsity.size()); for (int i = 0; i < sparsity.size(); ++i) { SparsityDescriptor& descriptor = new_sparsity[i]; int64_t sparse_batch_dim = descriptor.index() == 0 ? lhs_batch_dimension : rhs_batch_dimension; if (descriptor.dimension() > sparse_batch_dim) descriptor.set_dimension(descriptor.dimension() - (batch_dimension_count - 1)); HloInstruction* meta = dot->mutable_operand(HloDotInstruction::kOperands + i); Shape new_meta_shape = merge_batch_dims(meta->shape(), sparse_batch_dim); TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeReshapeHlo(new_meta_shape, meta)); } TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_lhs, MakeReshapeHlo(new_lhs_shape, dot->mutable_operand(0))); TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_rhs, MakeReshapeHlo(new_rhs_shape, dot->mutable_operand(1))); Shape new_dot_shape = merge_batch_dims(dot->shape(), 0); HloInstruction* new_dot = dot->parent()->AddInstruction( HloInstruction::CreateDot(new_dot_shape, reshaped_lhs, reshaped_rhs, new_dot_dimension_numbers, dot->precision_config(), new_sparsity, sparse_meta), &dot->metadata()); dot->SetupDerivedInstruction(new_dot); std::unique_ptr<HloInstruction> out_reshape = HloInstruction::CreateReshape(dot->shape(), new_dot); return ReplaceWithNewInstruction(dot, std::move(out_reshape)); } }; } absl::StatusOr<bool> DotDimensionMerger::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return BatchDimensionMerger().RunOnModule(module, execution_threads); } }
#include "xla/service/dot_dimension_merger.h" #include <memory> #include <string> #include <gtest/gtest.h> #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using DotDimensionMergerTest = HloTestBase; TEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensions) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[79,2,4,12,11] parameter(0) p1 = bf16[79,2,4,11,44] parameter(1) ROOT d = bf16[2,4,12,44] dot(p0, p1), lhs_batch_dims={1,2}, lhs_contracting_dims={0,4}, rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}, metadata={op_name="testname"} })"; RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"( ; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,2,1,0} reshape(%p0) ; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{3,2,1,0} reshape(%p1) ; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,1,0} dot(%[[R0]], %[[R1]]) ; CHECK-SAME: lhs_batch_dims={1} ; CHECK-SAME: lhs_contracting_dims={0,3} ; CHECK-SAME: rhs_batch_dims={1} ; CHECK-SAME: rhs_contracting_dims={0,2} ; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,2,1,0} reshape(%[[DOT]]) ; CHECK-SAME: metadata={op_name="testname"} )"); } TEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensionsNonDefaultLayouts) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[79,2,4,12,11]{4,0,3,2,1} parameter(0) p1 = bf16[79,2,4,11,44]{3,0,4,2,1} parameter(1) ROOT d = bf16[2,4,12,44]{3,1,0,2} dot(p0, p1), lhs_batch_dims={1,2}, lhs_contracting_dims={0,4}, rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}, metadata={op_name="testname"} })"; RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"( ; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,0,2,1} reshape(%p0) ; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{2,0,3,1} reshape(%p1) ; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,0,1} dot(%[[R0]], %[[R1]]) ; CHECK-SAME: lhs_batch_dims={1} ; CHECK-SAME: lhs_contracting_dims={0,3} ; CHECK-SAME: rhs_batch_dims={1} ; CHECK-SAME: rhs_contracting_dims={0,2} ; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,1,0,2} reshape(%[[DOT]]) ; CHECK-SAME: metadata={op_name="testname"} )"); } TEST_F(DotDimensionMergerTest, SkipPhysicallyNonConsecutiveBatchDimensions) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[2,4,12,13]{3,1,2,0} parameter(0) p1 = bf16[2,4,13,55]{3,2,1,0} parameter(1) ROOT d = bf16[2,4,12,55]{3,2,1,0} dot(p0, p1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionMerger().Run(module.get())); EXPECT_FALSE(modified); } TEST_F(DotDimensionMergerTest, SkipUnsortedBatchDimensions) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[4,2,12,13] parameter(0) p1 = bf16[2,4,13,55] parameter(1) ROOT d = bf16[2,4,12,55] dot(p0, p1), lhs_batch_dims={1,0}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionMerger().Run(module.get())); EXPECT_FALSE(modified); } TEST_F(DotDimensionMergerTest, SkipLogicallyNonConsecutiveBatchDimensions) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[2,12,4,13] parameter(0) p1 = bf16[2,4,13,55] parameter(1) ROOT d = bf16[2,4,12,55] dot(p0, p1), lhs_batch_dims={0,2}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionMerger().Run(module.get())); EXPECT_FALSE(modified); } TEST_F(DotDimensionMergerTest, SparseDotUpdatesDescriptor) { const std::string kHloText = R"( HloModule m ENTRY e { p0 = bf16[3,4,5,6,16] parameter(0) p1 = bf16[3,4,5,32,6] parameter(1) meta = u16[3,4,5,6,2] parameter(2) ROOT d = bf16[4,5,6,6] dot(p0, p1, meta), sparsity=L.4@2:4, lhs_batch_dims={1,2}, lhs_contracting_dims={0,4}, rhs_batch_dims={1,2}, rhs_contracting_dims={0,3} })"; RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"( ; CHECK: %[[R0:.*]] = bf16[3,20,6,16]{3,2,1,0} reshape(%p0) ; CHECK: %[[R1:.*]] = bf16[3,20,32,6]{3,2,1,0} reshape(%p1) ; CHECK: %[[R2:.*]] = u16[3,20,6,2]{3,2,1,0} reshape(%meta) ; CHECK: %[[DOT:.*]] = bf16[20,6,6]{2,1,0} dot(%[[R0]], %[[R1]], %[[R2]]) ; CHECK-SAME: lhs_batch_dims={1} ; CHECK-SAME: lhs_contracting_dims={0,3} ; CHECK-SAME: rhs_batch_dims={1} ; CHECK-SAME: rhs_contracting_dims={0,2} ; CHECK-SAME: sparsity=L.3@2:4 ; CHECK-NEXT: ROOT {{.+}} = bf16[4,5,6,6]{3,2,1,0} reshape(%[[DOT]]) )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
974b7765-e143-439c-bdc2-2ea618027569
cpp
tensorflow/tensorflow
cache
third_party/xla/xla/tsl/lib/io/cache.cc
third_party/xla/xla/tsl/lib/io/cache_test.cc
#include "xla/tsl/lib/io/cache.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "tsl/platform/mutex.h" #include "tsl/platform/raw_coding.h" namespace tsl { namespace table { Cache::~Cache() {} namespace { struct LRUHandle { void* value; void (*deleter)(const Slice&, void* value); LRUHandle* next_hash; LRUHandle* next; LRUHandle* prev; size_t charge; size_t key_length; bool in_cache; uint32_t refs; uint32_t hash; char key_data[1]; Slice key() const { assert(next != this); return Slice(key_data, key_length); } }; class HandleTable { public: HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); } ~HandleTable() { delete[] list_; } LRUHandle* Lookup(const Slice& key, uint32_t hash) { return *FindPointer(key, hash); } LRUHandle* Insert(LRUHandle* h) { LRUHandle** ptr = FindPointer(h->key(), h->hash); LRUHandle* old = *ptr; h->next_hash = (old == nullptr ? nullptr : old->next_hash); *ptr = h; if (old == nullptr) { ++elems_; if (elems_ > length_) { Resize(); } } return old; } LRUHandle* Remove(const Slice& key, uint32_t hash) { LRUHandle** ptr = FindPointer(key, hash); LRUHandle* result = *ptr; if (result != nullptr) { *ptr = result->next_hash; --elems_; } return result; } private: uint32_t length_; uint32_t elems_; LRUHandle** list_; LRUHandle** FindPointer(const Slice& key, uint32_t hash) { LRUHandle** ptr = &list_[hash & (length_ - 1)]; while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) { ptr = &(*ptr)->next_hash; } return ptr; } void Resize() { uint32_t new_length = 4; while (new_length < elems_) { new_length *= 2; } LRUHandle** new_list = new LRUHandle*[new_length]; memset(new_list, 0, sizeof(new_list[0]) * new_length); uint32_t count = 0; for (uint32_t i = 0; i < length_; i++) { LRUHandle* h = list_[i]; while (h != nullptr) { LRUHandle* next = h->next_hash; uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; h->next_hash = *ptr; *ptr = h; h = next; count++; } } assert(elems_ == count); delete[] list_; list_ = new_list; length_ = new_length; } }; class LRUCache { public: LRUCache(); ~LRUCache(); void SetCapacity(size_t capacity) { capacity_ = capacity; } Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)); Cache::Handle* Lookup(const Slice& key, uint32_t hash); void Release(Cache::Handle* handle); void Erase(const Slice& key, uint32_t hash); void Prune(); size_t TotalCharge() const { mutex_lock l(mutex_); return usage_; } private: void LRU_Remove(LRUHandle* e); void LRU_Append(LRUHandle* list, LRUHandle* e); void Ref(LRUHandle* e); void Unref(LRUHandle* e); bool FinishErase(LRUHandle* e) TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_); size_t capacity_; mutable mutex mutex_; size_t usage_ TF_GUARDED_BY(mutex_); LRUHandle lru_ TF_GUARDED_BY(mutex_); LRUHandle in_use_ TF_GUARDED_BY(mutex_); HandleTable table_ TF_GUARDED_BY(mutex_); }; LRUCache::LRUCache() : capacity_(0), usage_(0) { lru_.next = &lru_; lru_.prev = &lru_; in_use_.next = &in_use_; in_use_.prev = &in_use_; } LRUCache::~LRUCache() { assert(in_use_.next == &in_use_); for (LRUHandle* e = lru_.next; e != &lru_;) { LRUHandle* next = e->next; assert(e->in_cache); e->in_cache = false; assert(e->refs == 1); Unref(e); e = next; } } void LRUCache::Ref(LRUHandle* e) { if (e->refs == 1 && e->in_cache) { LRU_Remove(e); LRU_Append(&in_use_, e); } e->refs++; } void LRUCache::Unref(LRUHandle* e) { assert(e->refs > 0); e->refs--; if (e->refs == 0) { assert(!e->in_cache); (*e->deleter)(e->key(), e->value); free(e); } else if (e->in_cache && e->refs == 1) { LRU_Remove(e); LRU_Append(&lru_, e); } } void LRUCache::LRU_Remove(LRUHandle* e) { e->next->prev = e->prev; e->prev->next = e->next; } void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) { e->next = list; e->prev = list->prev; e->prev->next = e; e->next->prev = e; } Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) { mutex_lock l(mutex_); LRUHandle* e = table_.Lookup(key, hash); if (e != nullptr) { Ref(e); } return reinterpret_cast<Cache::Handle*>(e); } void LRUCache::Release(Cache::Handle* handle) { mutex_lock l(mutex_); Unref(reinterpret_cast<LRUHandle*>(handle)); } Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) { mutex_lock l(mutex_); LRUHandle* e = reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size())); e->value = value; e->deleter = deleter; e->charge = charge; e->key_length = key.size(); e->hash = hash; e->in_cache = false; e->refs = 1; memcpy(e->key_data, key.data(), key.size()); if (capacity_ > 0) { e->refs++; e->in_cache = true; LRU_Append(&in_use_, e); usage_ += charge; FinishErase(table_.Insert(e)); } else { e->next = nullptr; } while (usage_ > capacity_ && lru_.next != &lru_) { LRUHandle* old = lru_.next; assert(old->refs == 1); bool erased = FinishErase(table_.Remove(old->key(), old->hash)); if (!erased) { assert(erased); } } return reinterpret_cast<Cache::Handle*>(e); } bool LRUCache::FinishErase(LRUHandle* e) { if (e != nullptr) { assert(e->in_cache); LRU_Remove(e); e->in_cache = false; usage_ -= e->charge; Unref(e); } return e != nullptr; } void LRUCache::Erase(const Slice& key, uint32_t hash) { mutex_lock l(mutex_); FinishErase(table_.Remove(key, hash)); } void LRUCache::Prune() { mutex_lock l(mutex_); while (lru_.next != &lru_) { LRUHandle* e = lru_.next; assert(e->refs == 1); bool erased = FinishErase(table_.Remove(e->key(), e->hash)); if (!erased) { assert(erased); } } } static const int kNumShardBits = 4; static const int kNumShards = 1 << kNumShardBits; class ShardedLRUCache : public Cache { private: LRUCache shard_[kNumShards]; mutex id_mutex_; uint64_t last_id_; static inline uint32_t HashSlice(const Slice& s) { return Hash(s.data(), s.size(), 0); } static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); } public: explicit ShardedLRUCache(size_t capacity) : last_id_(0) { const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards; for (int s = 0; s < kNumShards; s++) { shard_[s].SetCapacity(per_shard); } } ~ShardedLRUCache() override {} Handle* Insert(const Slice& key, void* value, size_t charge, void (*deleter)(const Slice& key, void* value)) override { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter); } Handle* Lookup(const Slice& key) override { const uint32_t hash = HashSlice(key); return shard_[Shard(hash)].Lookup(key, hash); } void Release(Handle* handle) override { LRUHandle* h = reinterpret_cast<LRUHandle*>(handle); shard_[Shard(h->hash)].Release(handle); } void Erase(const Slice& key) override { const uint32_t hash = HashSlice(key); shard_[Shard(hash)].Erase(key, hash); } void* Value(Handle* handle) override { return reinterpret_cast<LRUHandle*>(handle)->value; } uint64_t NewId() override { mutex_lock l(id_mutex_); return ++(last_id_); } void Prune() override { for (int s = 0; s < kNumShards; s++) { shard_[s].Prune(); } } size_t TotalCharge() const override { size_t total = 0; for (int s = 0; s < kNumShards; s++) { total += shard_[s].TotalCharge(); } return total; } private: static uint32_t Hash(const char* data, size_t n, uint32_t seed) { const uint32_t m = 0xc6a4a793; const uint32_t r = 24; const char* limit = data + n; uint32_t h = seed ^ (n * m); while (data + 4 <= limit) { uint32_t w = core::DecodeFixed32(data); data += 4; h += w; h *= m; h ^= (h >> 16); } switch (limit - data) { case 3: h += static_cast<uint8_t>(data[2]) << 16; ABSL_FALLTHROUGH_INTENDED; case 2: h += static_cast<uint8_t>(data[1]) << 8; ABSL_FALLTHROUGH_INTENDED; case 1: h += static_cast<uint8_t>(data[0]); h *= m; h ^= (h >> r); break; } return h; } }; } Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); } } }
#include "xla/tsl/lib/io/cache.h" #include <string> #include <vector> #include "tsl/platform/coding.h" #include "tsl/platform/raw_coding.h" #include "tsl/platform/test.h" namespace tsl { namespace table { static std::string EncodeKey(int k) { std::string result; core::PutFixed32(&result, k); return result; } static int DecodeKey(const Slice& k) { assert(k.size() == 4); return core::DecodeFixed32(k.data()); } static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); } static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); } class CacheTest : public ::testing::Test { public: static void Deleter(const Slice& key, void* v) { current_->deleted_keys_.push_back(DecodeKey(key)); current_->deleted_values_.push_back(DecodeValue(v)); } static constexpr int kCacheSize = 1000; std::vector<int> deleted_keys_; std::vector<int> deleted_values_; Cache* cache_; CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; } ~CacheTest() { delete cache_; } int Lookup(int key) { Cache::Handle* handle = cache_->Lookup(EncodeKey(key)); const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle)); if (handle != nullptr) { cache_->Release(handle); } return r; } void Insert(int key, int value, int charge = 1) { cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge, &CacheTest::Deleter)); } Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) { return cache_->Insert(EncodeKey(key), EncodeValue(value), charge, &CacheTest::Deleter); } void Erase(int key) { cache_->Erase(EncodeKey(key)); } static CacheTest* current_; }; CacheTest* CacheTest::current_; TEST_F(CacheTest, HitAndMiss) { ASSERT_EQ(-1, Lookup(100)); Insert(100, 101); ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(-1, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); Insert(200, 201); ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); Insert(100, 102); ASSERT_EQ(102, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); } TEST_F(CacheTest, Erase) { Erase(200); ASSERT_EQ(0, deleted_keys_.size()); Insert(100, 101); Insert(200, 201); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(1, deleted_keys_.size()); } TEST_F(CacheTest, EntriesArePinned) { Insert(100, 101); Cache::Handle* h1 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(101, DecodeValue(cache_->Value(h1))); Insert(100, 102); Cache::Handle* h2 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(102, DecodeValue(cache_->Value(h2))); ASSERT_EQ(0, deleted_keys_.size()); cache_->Release(h1); ASSERT_EQ(1, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(1, deleted_keys_.size()); cache_->Release(h2); ASSERT_EQ(2, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[1]); ASSERT_EQ(102, deleted_values_[1]); } TEST_F(CacheTest, EvictionPolicy) { Insert(100, 101); Insert(200, 201); Insert(300, 301); Cache::Handle* h = cache_->Lookup(EncodeKey(300)); for (int i = 0; i < kCacheSize + 100; i++) { Insert(1000 + i, 2000 + i); ASSERT_EQ(2000 + i, Lookup(1000 + i)); ASSERT_EQ(101, Lookup(100)); } ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(-1, Lookup(200)); ASSERT_EQ(301, Lookup(300)); cache_->Release(h); } TEST_F(CacheTest, UseExceedsCacheSize) { std::vector<Cache::Handle*> h; for (int i = 0; i < kCacheSize + 100; i++) { h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i)); } for (int i = 0; i < h.size(); i++) { ASSERT_EQ(2000 + i, Lookup(1000 + i)); } for (int i = 0; i < h.size(); i++) { cache_->Release(h[i]); } } TEST_F(CacheTest, HeavyEntries) { const int kLight = 1; const int kHeavy = 10; int added = 0; int index = 0; while (added < 2 * kCacheSize) { const int weight = (index & 1) ? kLight : kHeavy; Insert(index, 1000 + index, weight); added += weight; index++; } int cached_weight = 0; for (int i = 0; i < index; i++) { const int weight = (i & 1 ? kLight : kHeavy); int r = Lookup(i); if (r >= 0) { cached_weight += weight; ASSERT_EQ(1000 + i, r); } } ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10); } TEST_F(CacheTest, NewId) { uint64_t a = cache_->NewId(); uint64_t b = cache_->NewId(); ASSERT_NE(a, b); } TEST_F(CacheTest, Prune) { Insert(1, 100); Insert(2, 200); Cache::Handle* handle = cache_->Lookup(EncodeKey(1)); ASSERT_TRUE(handle); cache_->Prune(); cache_->Release(handle); ASSERT_EQ(100, Lookup(1)); ASSERT_EQ(-1, Lookup(2)); } TEST_F(CacheTest, ZeroSizeCache) { delete cache_; cache_ = NewLRUCache(0); Insert(1, 100); ASSERT_EQ(-1, Lookup(1)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0f77a7a9-b07e-42da-9f2a-f6b620f364f6
cpp
google/arolla
frame
arolla/memory/frame.cc
arolla/memory/frame_test.cc
#include "arolla/memory/frame.h" #include <algorithm> #include <cstddef> #include <cstring> #include <tuple> #include <typeindex> #include <typeinfo> #include <utility> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "arolla/util/algorithms.h" #include "arolla/util/memory.h" namespace arolla { std::type_index FrameLayout::FieldFactory::type_index() const { return type_; } void FrameLayout::FieldFactory::Add(size_t offset) { offsets_.push_back(offset); } void FrameLayout::FieldFactory::AddDerived( const FieldFactory& derived_factory) { DCHECK(type_index() == derived_factory.type_index()); for (size_t cur_offset : derived_factory.offsets_) { offsets_.push_back(cur_offset); } } FrameLayout::FieldFactory FrameLayout::FieldFactory::Derive( size_t offset) const { FieldFactory res = *this; for (size_t& cur_offset : res.offsets_) { cur_offset += offset; } return res; } void FrameLayout::FieldInitializers::AddOffsetToFactory( size_t offset, FieldFactory empty_factory) { auto it = type2factory.find(empty_factory.type_index()); if (it == type2factory.end()) { bool inserted; std::tie(it, inserted) = type2factory.emplace(empty_factory.type_index(), factories.size()); factories.push_back(std::move(empty_factory)); } DCHECK_LT(it->second, factories.size()); if (it->second < factories.size()) { factories[it->second].Add(offset); } } void FrameLayout::FieldInitializers::AddDerived( size_t offset, const FieldInitializers& derived_initializers) { for (const auto& [derived_tpe, derived_id] : derived_initializers.type2factory) { const auto& derived_factory = derived_initializers.factories[derived_id]; if (auto it = type2factory.find(derived_tpe); it != type2factory.end()) { factories[it->second].AddDerived(derived_factory.Derive(offset)); } else { type2factory.emplace(derived_tpe, factories.size()); factories.push_back(derived_factory.Derive(offset)); } } } FrameLayout::Slot<void> FrameLayout::Builder::AddSubFrame( const FrameLayout& subframe) { alloc_size_ = RoundUp(alloc_size_, subframe.AllocAlignment().value); size_t offset = alloc_size_; alloc_size_ += subframe.AllocSize(); alloc_alignment_ = std::max(alloc_alignment_, subframe.AllocAlignment().value); initializers_.AddDerived(offset, subframe.initializers_); #ifndef NDEBUG for (const auto& [field_offset, field_type] : subframe.registered_fields_) { registered_fields_.emplace(offset + field_offset, field_type); } #endif return FrameLayout::Slot<void>(offset); } absl::Status FrameLayout::Builder::RegisterUnsafeSlot( size_t byte_offset, size_t byte_size, const std::type_info& type) { return RegisterSlot(byte_offset, byte_size, type); } absl::Status FrameLayout::Builder::RegisterSlot(size_t byte_offset, size_t byte_size, const std::type_info& type, bool allow_duplicates) { if (byte_offset == FrameLayout::Slot<float>::kUninitializedOffset) { return absl::FailedPreconditionError( "unable to register uninitialized slot"); } if (byte_offset > alloc_size_ || byte_size > alloc_size_ - byte_offset) { return absl::FailedPreconditionError(absl::StrCat( "unable to register slot after the end of alloc, offset: ", byte_offset, ", size: ", byte_size, ", alloc size: ", alloc_size_)); } #ifndef NDEBUG if (!registered_fields_.emplace(byte_offset, std::type_index(type)).second && !allow_duplicates) { return absl::FailedPreconditionError(absl::StrCat( "slot is already registered ", byte_offset, " ", type.name())); } #endif return absl::OkStatus(); } }
#include "arolla/memory/frame.h" #include <array> #include <cstddef> #include <cstdint> #include <memory> #include <sstream> #include <string> #include <type_traits> #include <utility> #include <variant> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/dynamic_annotations.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "arolla/memory/memory_allocation.h" #include "arolla/util/demangle.h" #include "arolla/util/is_bzero_constructible.h" #include "arolla/util/memory.h" #include "arolla/util/status_macros_backport.h" namespace arolla::testing { namespace { using ::absl_testing::IsOk; using ::absl_testing::StatusIs; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::HasSubstr; using ::testing::IsEmpty; struct SimpleStruct { int a; float b; }; struct InitializedStruct { int a = 1; float b = 2.0; }; TEST(FrameLayoutTest, SlotOutput) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int>(); std::ostringstream ss; ss << slot; EXPECT_EQ(ss.str(), std::string("Slot<") + TypeName<int>() + ">(0)"); } TEST(FrameLayoutTest, SimpleFields) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int>(); auto slot2 = builder.AddSlot<float>(); auto slot3 = builder.AddSlot<double>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(0)); EXPECT_THAT(frame.Get(slot2), Eq(0.0f)); EXPECT_THAT(frame.Get(slot3), Eq(0.0)); frame.Set(slot1, 1); frame.Set(slot2, 2.0f); frame.Set(slot3, M_PI); EXPECT_THAT(frame.Get(slot1), Eq(1)); EXPECT_THAT(frame.Get(slot2), Eq(2.0f)); EXPECT_THAT(frame.Get(slot3), Eq(M_PI)); } TEST(FrameLayoutTest, SimpleArrays) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::array<int, 4>>(); auto slot2 = builder.AddSlot<std::array<float, 4>>(); auto slot3 = builder.AddSlot<std::array<char, 4>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), ElementsAre(0, 0, 0, 0)); EXPECT_THAT(frame.Get(slot2), ElementsAre(0.0f, 0.0f, 0.0f, 0.0f)); EXPECT_THAT(frame.Get(slot3), ElementsAre(0, 0, 0, 0)); frame.Set(slot1, std::array<int, 4>{1, 2, 3, 4}); frame.Set(slot2, std::array<float, 4>{1.0f, 2.0f, 3.0f, 4.0f}); frame.Set(slot3, std::array<char, 4>{'a', 'b', 'c', 'd'}); EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3, 4)); EXPECT_THAT(frame.Get(slot2), ElementsAre(1.0f, 2.0f, 3.0f, 4.0f)); EXPECT_THAT(frame.Get(slot3), ElementsAre('a', 'b', 'c', 'd')); } TEST(FrameLayoutTest, SimplePointers) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int*>(); auto slot2 = builder.AddSlot<char*>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(nullptr)); EXPECT_THAT(frame.Get(slot2), Eq(nullptr)); int int_values[] = {1, 2, 3, 4}; char text[] = "It was a dark and stormy night."; frame.Set(slot1, int_values); frame.Set(slot2, text); EXPECT_THAT(frame.Get(slot1), Eq(int_values)); EXPECT_THAT(frame.Get(slot2), Eq(text)); } TEST(FrameLayoutTest, SmartPointers) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::unique_ptr<int>>(); auto slot2 = builder.AddSlot<std::unique_ptr<std::string>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), Eq(nullptr)); EXPECT_THAT(frame.Get(slot2), Eq(nullptr)); frame.Set(slot1, std::make_unique<int>(12)); frame.Set(slot2, std::make_unique<std::string>("It was a dark and stormy night.")); EXPECT_THAT(*frame.Get(slot1), Eq(12)); EXPECT_THAT(*frame.Get(slot2), Eq("It was a dark and stormy night.")); } TEST(FrameLayoutTest, Vector) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::vector<int>>(); auto slot2 = builder.AddSlot<std::vector<std::string>>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), IsEmpty()); EXPECT_THAT(frame.Get(slot2), IsEmpty()); auto* int_vector = frame.GetMutable(slot1); int_vector->push_back(1); int_vector->push_back(2); int_vector->push_back(3); auto* string_vector = frame.GetMutable(slot2); string_vector->push_back("How"); string_vector->push_back("now"); string_vector->push_back("brown"); string_vector->push_back("cow?"); EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3)); EXPECT_THAT(frame.Get(slot2), ElementsAre("How", "now", "brown", "cow?")); } TEST(FrameLayoutTest, Structs) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<SimpleStruct>(); auto slot2 = builder.AddSlot<InitializedStruct>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); const SimpleStruct& s1 = frame.Get(slot1); EXPECT_THAT(s1.a, Eq(0)); EXPECT_THAT(s1.b, Eq(0.0f)); const InitializedStruct& s2 = frame.Get(slot2); EXPECT_THAT(s2.a, Eq(1)); EXPECT_THAT(s2.b, Eq(2.0f)); } TEST(FrameLayoutTest, AFewDifferentTypesWellInitialized) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<std::vector<int>>(); auto slot2 = builder.AddSlot<std::vector<std::string>>(); auto slot3 = builder.AddSlot<std::vector<int>>(); auto slot4 = builder.AddSlot<SimpleStruct>(); auto slot5 = builder.AddSlot<InitializedStruct>(); auto slot6 = builder.AddSlot<std::vector<int>>(); auto slot7 = builder.AddSlot<std::vector<std::string>>(); auto slot8 = builder.AddSlot<std::vector<double>>(); auto slot9 = builder.AddSlot<InitializedStruct>(); auto layout = std::move(builder).Build(); MemoryAllocation alloc(&layout); FramePtr frame = alloc.frame(); EXPECT_THAT(frame.Get(slot1), IsEmpty()); EXPECT_THAT(frame.Get(slot2), IsEmpty()); EXPECT_THAT(frame.Get(slot3), IsEmpty()); EXPECT_THAT(frame.Get(slot6), IsEmpty()); EXPECT_THAT(frame.Get(slot7), IsEmpty()); EXPECT_THAT(frame.Get(slot8), IsEmpty()); const SimpleStruct& simple = frame.Get(slot4); EXPECT_THAT(simple.a, Eq(0)); EXPECT_THAT(simple.b, Eq(0.0f)); for (const InitializedStruct& init : {frame.Get(slot5), frame.Get(slot9)}) { EXPECT_THAT(init.a, Eq(1)); EXPECT_THAT(init.b, Eq(2.0f)); } } TEST(FrameLayoutTest, HasField) { FrameLayout::Builder builder; auto slot1 = builder.AddSlot<int>(); auto slot2 = builder.AddSlot<std::vector<int>>(); auto slot3 = builder.AddSlot<SimpleStruct>(); auto slot4 = builder.AddSlot<std::array<SimpleStruct, 4>>(); auto slot5 = builder.AddSlot<InitializedStruct>(); auto slot6 = builder.AddSlot<std::array<InitializedStruct, 4>>(); auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(slot1.byte_offset(), typeid(int))); EXPECT_TRUE(layout.HasField(slot2.byte_offset(), typeid(std::vector<int>))); EXPECT_TRUE(layout.HasField(slot3.byte_offset(), typeid(SimpleStruct))); EXPECT_TRUE(layout.HasField(slot4.byte_offset(), typeid(std::array<SimpleStruct, 4>))); EXPECT_TRUE(layout.HasField(slot5.byte_offset(), typeid(InitializedStruct))); EXPECT_TRUE(layout.HasField(slot6.byte_offset(), typeid(std::array<InitializedStruct, 4>))); } TEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) { FrameLayout::Builder builder; ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok()); auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(0, typeid(std::monostate()))); } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_2part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 2); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk()); ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t), typeid(int8_t)), IsOk()); #ifndef NDEBUG EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part), StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("slot is already registered"))); EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true), IsOk()); #endif auto layout = std::move(builder).Build(); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t))); EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t))); #ifndef NDEBUG EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float))); EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t))); #endif } TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) { FrameLayout::Builder builder; auto slot = builder.AddSlot<int32_t>(); auto slot_1part = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset()); auto slot_after_end = FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 4); auto uninitialized_slot = FrameLayout::Slot<int16_t>::UnsafeUninitializedSlot(); auto status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_OK(status); #ifndef NDEBUG status = builder.RegisterUnsafeSlot(slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); status = builder.RegisterUnsafeSlot(slot_1part); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("slot is already registered")); #endif status = builder.RegisterUnsafeSlot(slot_after_end); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc")); status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int)); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register slot after the end of alloc, " "offset: 100, size: 4, alloc size: 4")); status = builder.RegisterUnsafeSlot(uninitialized_slot); ASSERT_FALSE(status.ok()); ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition); EXPECT_THAT(status.message(), HasSubstr("unable to register uninitialized slot")); } struct SelfReference { const SelfReference* self; SelfReference() : self(this) {} SelfReference(const SelfReference&) = delete; SelfReference& operator=(const SelfReference&) = delete; ~SelfReference() { volatile auto secure_ptr = &self; *secure_ptr = nullptr; } }; TEST(FrameLayoutTest, AddSubFrame) { FrameLayout subframe_layout; std::vector<FrameLayout::Slot<SelfReference>> field_slots; { FrameLayout::Builder builder; for (int i = 0; i < 2; ++i) { field_slots.push_back(builder.AddSlot<SelfReference>()); } subframe_layout = std::move(builder).Build(); } FrameLayout frame_layout; std::vector<FrameLayout::Slot<void>> subframe_slots; { FrameLayout::Builder builder; builder.AddSlot<float>(); for (int j = 0; j < 3; ++j) { subframe_slots.push_back(builder.AddSubFrame(subframe_layout)); builder.AddSlot<double>(); } frame_layout = std::move(builder).Build(); } for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { EXPECT_TRUE(frame_layout.HasField( subframe_slot.byte_offset() + field_slot.byte_offset(), typeid(SelfReference))); } } const auto alloc = AlignedAlloc(frame_layout.AllocAlignment(), frame_layout.AllocSize()); frame_layout.InitializeAlignedAlloc(alloc.get()); FramePtr frame(alloc.get(), &frame_layout); for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { const void* subframe_ptr = frame.GetRawPointer(subframe_slot.byte_offset()); ConstFramePtr subframe(subframe_ptr, &subframe_layout); const SelfReference& field = subframe.Get(field_slot); EXPECT_TRUE(field.self == &field); } } frame_layout.DestroyAlloc(alloc.get()); ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(alloc.get(), frame_layout.AllocSize()); for (const auto& subframe_slot : subframe_slots) { for (const auto& field_slot : field_slots) { const void* subframe_ptr = frame.GetRawPointer(subframe_slot.byte_offset()); ConstFramePtr subframe(subframe_ptr, &subframe_layout); const SelfReference& field = subframe.Get(field_slot); EXPECT_TRUE(field.self == nullptr); } } } TEST(FrameLayoutTest, AddSubFrameAllocAlignment) { FrameLayout::Builder builder; builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>()); builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>()); auto frame_layout = std::move(builder).Build(); EXPECT_EQ(frame_layout.AllocSize(), 32); EXPECT_EQ(frame_layout.AllocAlignment().value, 16); } TEST(FrameLayoutTest, ArrayCompatibility) { FrameLayout::Builder builder; builder.AddSlot<std::aligned_storage_t<16, 16>>(); builder.AddSlot<std::aligned_storage_t<1, 1>>(); auto frame_layout = std::move(builder).Build(); EXPECT_EQ(frame_layout.AllocSize(), 32); EXPECT_EQ(frame_layout.AllocAlignment().value, 16); } TEST(FrameLayoutTest, InitDestroyAllocN) { static int instance_counter = 0; struct InstanceCounted { InstanceCounted() { ++instance_counter; } ~InstanceCounted() { --instance_counter; } }; struct SelfReferenced { SelfReferenced() : self(this) {} SelfReferenced* self; }; FrameLayout::Builder builder; auto int_slot = builder.AddSlot<int>(); auto self_ref_slot = builder.AddSlot<SelfReferenced>(); builder.AddSlot<InstanceCounted>(); auto layout = std::move(builder).Build(); const int n = 10; const auto alloc = AlignedAlloc(layout.AllocAlignment(), layout.AllocSize() * n); layout.InitializeAlignedAllocN(alloc.get(), n); EXPECT_EQ(instance_counter, n); for (int i = 0; i < n; ++i) { ConstFramePtr ith_frame( static_cast<const std::byte*>(alloc.get()) + i * layout.AllocSize(), &layout); EXPECT_EQ(ith_frame.Get(int_slot), 0); EXPECT_EQ(ith_frame.Get(self_ref_slot).self, &ith_frame.Get(self_ref_slot)); } layout.DestroyAllocN(alloc.get(), n); EXPECT_EQ(instance_counter, 0); } struct IsBZeroConstructible { static bool ctor_called; static bool dtor_called; IsBZeroConstructible() { ctor_called = true; } ~IsBZeroConstructible() { dtor_called = true; } }; bool IsBZeroConstructible::ctor_called; bool IsBZeroConstructible::dtor_called; } } namespace arolla { template <> struct is_bzero_constructible<::arolla::testing::IsBZeroConstructible> : std::true_type {}; } namespace arolla::testing { namespace { TEST(FrameLayoutTest, IsBZeroConstructibleHandling) { ASSERT_FALSE(IsBZeroConstructible::ctor_called); ASSERT_FALSE(IsBZeroConstructible::dtor_called); { auto layout = MakeTypeLayout<IsBZeroConstructible>(); MemoryAllocation alloc(&layout); } EXPECT_FALSE(IsBZeroConstructible::ctor_called); EXPECT_TRUE(IsBZeroConstructible::dtor_called); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
93ddf3a3-54f7-4d52-9e60-c7a46d275e27
cpp
google/tensorstore
codec_chain_spec
tensorstore/driver/zarr3/codec/codec_chain_spec.cc
tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h" #include <stddef.h> #include <cassert> #include <optional> #include <string> #include <string_view> #include <type_traits> #include <utility> #include <vector> #include "absl/container/fixed_array.h" #include "absl/status/status.h" #include "absl/strings/str_format.h" #include <nlohmann/json.hpp> #include "tensorstore/codec_spec.h" #include "tensorstore/codec_spec_registry.h" #include "tensorstore/driver/zarr3/codec/bytes.h" #include "tensorstore/driver/zarr3/codec/codec.h" #include "tensorstore/driver/zarr3/codec/codec_spec.h" #include "tensorstore/driver/zarr3/codec/registry.h" #include "tensorstore/driver/zarr3/codec/transpose.h" #include "tensorstore/driver/zarr3/name_configuration_json_binder.h" #include "tensorstore/index.h" #include "tensorstore/internal/cache_key/cache_key.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/internal/json_binding/bindable.h" #include "tensorstore/internal/json_binding/json_binding.h" #include "tensorstore/internal/json_binding/std_array.h" #include "tensorstore/internal/json_binding/std_optional.h" #include "tensorstore/internal/unaligned_data_type_functions.h" #include "tensorstore/rank.h" #include "tensorstore/serialization/fwd.h" #include "tensorstore/serialization/json_bindable.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_zarr3 { namespace jb = ::tensorstore::internal_json_binding; namespace { struct ZarrCodecJsonBinderImpl { static absl::Status FromJson(const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j); static absl::Status ToJson(const ZarrCodecSpec::ToJsonOptions& options, const ZarrCodecSpec* const* obj, ::nlohmann::json* j); absl::Status operator()(std::true_type is_loading, const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) const { return FromJson(options, obj, j); } template <typename T> absl::Status operator()(std::false_type is_loading, const ZarrCodecSpec::ToJsonOptions& options, T* obj, ::nlohmann::json* j) const { static_assert( std::is_convertible_v<decltype(&**obj), const ZarrCodecSpec*>); const ZarrCodecSpec* ptr = &**obj; return ToJson(options, &ptr, j); } }; constexpr inline ZarrCodecJsonBinderImpl ZarrCodecJsonBinder{}; constexpr auto ZarrCodecJsonBinderImplBase = [](auto is_loading, const auto& options, auto* obj, auto* j) { const auto& registry = GetCodecRegistry(); if constexpr (is_loading) { if (options.constraints && j->is_string()) { ::nlohmann::json::object_t j_obj; j_obj.emplace("name", std::move(*j)); *j = std::move(j_obj); } } return jb::Object(NameConfigurationJsonBinder( registry.KeyBinder(), registry.RegisteredObjectBinder())) (is_loading, options, obj, j); }; absl::Status ZarrCodecJsonBinderImpl::FromJson( const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) { return ZarrCodecJsonBinderImplBase(std::true_type{}, options, obj, j); } absl::Status ZarrCodecJsonBinderImpl::ToJson( const ZarrCodecSpec::ToJsonOptions& options, const ZarrCodecSpec* const* obj, ::nlohmann::json* j) { return ZarrCodecJsonBinderImplBase(std::false_type{}, options, obj, j); } constexpr auto ZarrCodecChainSpecJsonBinderImpl = jb::Compose< std::vector<ZarrCodecSpec::Ptr>>( [](auto is_loading, const auto& options, auto* obj, auto* j) { if constexpr (is_loading) { auto it = j->begin(), end = j->end(); for (; it != end && (*it)->kind() == ZarrCodecKind::kArrayToArray; ++it) { obj->array_to_array.push_back( internal::static_pointer_cast<const ZarrArrayToArrayCodecSpec>( std::move(*it))); } if (it != end && (*it)->kind() == ZarrCodecKind::kArrayToBytes) { obj->array_to_bytes = internal::static_pointer_cast<const ZarrArrayToBytesCodecSpec>( std::move(*it)); ++it; } else if (!options.constraints) { return absl::InvalidArgumentError( "array -> bytes codec must be specified"); } for (; it != end; ++it) { if ((*it)->kind() != ZarrCodecKind::kBytesToBytes) { return absl::InvalidArgumentError(tensorstore::StrCat( "Expected bytes -> bytes codec, but received: ", jb::ToJson(*it, ZarrCodecJsonBinder).value().dump())); } obj->bytes_to_bytes.push_back( internal::static_pointer_cast<const ZarrBytesToBytesCodecSpec>( std::move(*it))); } } else { j->insert(j->end(), obj->array_to_array.begin(), obj->array_to_array.end()); if (obj->array_to_bytes) { j->push_back(obj->array_to_bytes); } j->insert(j->end(), obj->bytes_to_bytes.begin(), obj->bytes_to_bytes.end()); } return absl::OkStatus(); }, jb::Array(ZarrCodecJsonBinder)); } TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec, ZarrCodecChainSpecJsonBinderImpl); namespace { Result<ZarrArrayToBytesCodecSpec::Ptr> GetDefaultArrayToBytesCodecSpec( const ArrayCodecResolveParameters& decoded) { if (internal::IsTrivialDataType(decoded.dtype)) { return DefaultBytesCodec(); } return absl::InternalError(tensorstore::StrCat( "No default codec defined for data type ", decoded.dtype)); } absl::Status CodecResolveError(const ZarrCodecSpec& codec_spec, std::string_view message, const absl::Status& status) { return tensorstore::MaybeAnnotateStatus( status, tensorstore::StrCat( "Error ", message, " through ", jb::ToJson(&codec_spec, ZarrCodecJsonBinder).value().dump())); } } size_t ZarrCodecChainSpec::sharding_height() const { return array_to_bytes ? array_to_bytes->sharding_height() : 0; } absl::Status ZarrCodecChainSpec::GetDecodedChunkLayout( const ArrayDataTypeAndShapeInfo& array_info, ArrayCodecChunkLayoutInfo& decoded) const { absl::FixedArray<ArrayDataTypeAndShapeInfo, 2> array_infos( array_to_array.size()); const ArrayDataTypeAndShapeInfo* decoded_array_info = &array_info; for (size_t i = 0; i < array_to_array.size(); ++i) { const auto& codec_spec = *array_to_array[i]; auto& encoded_array_info = array_infos[i]; TENSORSTORE_RETURN_IF_ERROR( codec_spec.PropagateDataTypeAndShape(*decoded_array_info, encoded_array_info), CodecResolveError(codec_spec, "propagating data type and shape", _)); decoded_array_info = &encoded_array_info; } std::optional<ArrayCodecChunkLayoutInfo> temp_info[2]; const ArrayCodecChunkLayoutInfo* encoded_info; if (array_to_bytes) { auto& decoded_info = array_infos.empty() ? decoded : temp_info[0].emplace(); TENSORSTORE_RETURN_IF_ERROR( array_to_bytes->GetDecodedChunkLayout( array_infos.empty() ? array_info : array_infos.back(), decoded_info), CodecResolveError(*array_to_bytes, "propagating chunk layout", _)); encoded_info = &decoded_info; } else if (!array_to_array.empty()) { encoded_info = &temp_info[0].emplace(); } for (size_t i = array_to_array.size(); i--;) { auto& decoded_info = i == 0 ? decoded : temp_info[(array_to_array.size() - i) % 2].emplace(); const auto& codec_spec = *array_to_array[i]; TENSORSTORE_RETURN_IF_ERROR( codec_spec.GetDecodedChunkLayout( array_infos[i], *encoded_info, i == 0 ? array_info : array_infos[i - 1], decoded_info), CodecResolveError(codec_spec, "propagating chunk layout", _)); encoded_info = &decoded_info; } return absl::OkStatus(); } Result<internal::IntrusivePtr<const ZarrCodecChain>> ZarrCodecChainSpec::Resolve(ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded, ZarrCodecChainSpec* resolved_spec) const { auto chain = internal::MakeIntrusivePtr<ZarrCodecChain>(); std::optional<ArrayCodecResolveParameters> temp_array_resolve_params[2]; chain->array_to_array.reserve(array_to_array.size()); chain->bytes_to_bytes.reserve(bytes_to_bytes.size()); if (resolved_spec) { assert(resolved_spec != this); assert(resolved_spec->array_to_array.empty()); resolved_spec->array_to_array.reserve(array_to_array.size()); assert(!resolved_spec->array_to_bytes); assert(resolved_spec->bytes_to_bytes.empty()); resolved_spec->bytes_to_bytes.reserve(bytes_to_bytes.size()); } ArrayCodecResolveParameters* decoded_params = &decoded; size_t temp_i = 0; const auto resolve_array_to_array = [&](const ZarrArrayToArrayCodecSpec& codec_spec) -> absl::Status { auto& encoded_params = temp_array_resolve_params[(temp_i++) % 2].emplace(); TENSORSTORE_ASSIGN_OR_RETURN( auto codec, codec_spec.Resolve(std::move(*decoded_params), encoded_params, resolved_spec ? &resolved_spec->array_to_array.emplace_back() : nullptr), CodecResolveError(codec_spec, "resolving codec spec", _)); chain->array_to_array.push_back(std::move(codec)); decoded_params = &encoded_params; return absl::OkStatus(); }; for (size_t i = 0; i < array_to_array.size(); ++i) { TENSORSTORE_RETURN_IF_ERROR(resolve_array_to_array(*array_to_array[i])); } std::optional<BytesCodecResolveParameters> temp_bytes_resolve_params[2]; auto* bytes_decoded_params = &temp_bytes_resolve_params[0].emplace(); ZarrArrayToBytesCodecSpec::Ptr temp_array_to_bytes_codec; auto* array_to_bytes_codec_ptr = this->array_to_bytes.get(); if (!array_to_bytes_codec_ptr) { TENSORSTORE_ASSIGN_OR_RETURN( temp_array_to_bytes_codec, GetDefaultArrayToBytesCodecSpec(*decoded_params)); array_to_bytes_codec_ptr = temp_array_to_bytes_codec.get(); } DimensionIndex preferred_order[kMaxRank]; if (DimensionIndex rank = decoded_params->rank; decoded_params->inner_order && !array_to_bytes_codec_ptr->SupportsInnerOrder( *decoded_params, span<DimensionIndex>(&preferred_order[0], rank))) { const auto& existing_inner_order = *decoded_params->inner_order; std::vector<DimensionIndex> new_order(rank); for (DimensionIndex i = 0; i < rank; ++i) { new_order[preferred_order[i]] = existing_inner_order[i]; } TENSORSTORE_RETURN_IF_ERROR( resolve_array_to_array(*internal::MakeIntrusivePtr<TransposeCodecSpec>( TransposeCodecSpec::Options{std::move(new_order)}))); } TENSORSTORE_ASSIGN_OR_RETURN( chain->array_to_bytes, array_to_bytes_codec_ptr->Resolve( std::move(*decoded_params), *bytes_decoded_params, resolved_spec ? &resolved_spec->array_to_bytes : nullptr), CodecResolveError(*array_to_bytes, "resolving codec spec", _)); if (chain->array_to_bytes->is_sharding_codec() && !bytes_to_bytes.empty()) { return absl::InvalidArgumentError(absl::StrFormat( "Sharding codec %s is not compatible with subsequent bytes -> " "bytes codecs %s that apply to the entire shard. Instead, " "bytes -> bytes codecs may be specified as inner codecs that apply " "to each sub-chunk individually.", jb::ToJson(array_to_bytes_codec_ptr, ZarrCodecJsonBinder) .value() .dump(), jb::ToJson(bytes_to_bytes, jb::Array(ZarrCodecJsonBinder)) .value() .dump())); } for (size_t i = 0; i < bytes_to_bytes.size(); ++i) { auto& encoded_params = temp_bytes_resolve_params[(i + 1) % 2].emplace(); const auto& codec_spec = *bytes_to_bytes[i]; TENSORSTORE_ASSIGN_OR_RETURN( auto codec, codec_spec.Resolve(std::move(*bytes_decoded_params), encoded_params, resolved_spec ? &resolved_spec->bytes_to_bytes.emplace_back() : nullptr), CodecResolveError(codec_spec, "resolving codec spec", _)); bytes_decoded_params = &encoded_params; chain->bytes_to_bytes.push_back(std::move(codec)); } encoded = std::move(*bytes_decoded_params); return chain; } namespace { template <typename T, typename Binder> std::string MergeErrorMessage(const T& a, const T& b, const Binder& binder) { return absl::StrFormat("Cannot merge zarr codec constraints %s and %s", jb::ToJson(a, binder).value().dump(), jb::ToJson(b, binder).value().dump()); } std::string MergeErrorMessage(const ZarrCodecSpec& a, const ZarrCodecSpec& b) { return MergeErrorMessage(ZarrCodecSpec::Ptr(&a), ZarrCodecSpec::Ptr(&b), ZarrCodecJsonBinder); } template <typename T> void EnsureMutableCodecSpec(internal::IntrusivePtr<const T>& ptr) { static_assert(std::is_base_of_v<ZarrCodecSpec, T>); assert(ptr); if (ptr->use_count() > 1) { ptr = internal::static_pointer_cast<const T>(ptr->Clone()); } } absl::Status MergeZarrCodecSpecs(ZarrCodecSpec::Ptr& target, const ZarrCodecSpec* source, bool strict) { if (!source) { return absl::OkStatus(); } if (!target) { target.reset(source); return absl::OkStatus(); } absl::Status status; const auto& target_ref = *target; const auto& source_ref = *source; if (typeid(target_ref) != typeid(source_ref)) { status = absl::FailedPreconditionError(""); } else { EnsureMutableCodecSpec(target); status = const_cast<ZarrCodecSpec&>(*target).MergeFrom(*source, strict); } if (status.ok()) return absl::OkStatus(); return tensorstore::MaybeAnnotateStatus(status, MergeErrorMessage(*target, *source)); } template <typename T> absl::Status MergeZarrCodecSpecs(typename T::Ptr& target, const T* source, bool strict) { static_assert(std::is_base_of_v<ZarrCodecSpec, T>); ZarrCodecSpec::Ptr target_base = std::move(target); auto status = MergeZarrCodecSpecs(target_base, source, strict); target = internal::static_pointer_cast<const T>(std::move(target_base)); TENSORSTORE_RETURN_IF_ERROR(status); return absl::OkStatus(); } template <typename T> absl::Status MergeZarrCodecSpecs(std::vector<T>& targets, const std::vector<T>& sources, bool strict) { constexpr bool kIsArrayToArray = std::is_same_v<ZarrArrayToArrayCodecSpec::Ptr, T>; size_t merge_count = targets.size(); bool size_mismatch = targets.size() != sources.size(); if constexpr (kIsArrayToArray) { if (!strict) { if (sources.size() == targets.size() + 1 && typeid(*sources.back()) == typeid(TransposeCodecSpec)) { targets.push_back(sources.back()); size_mismatch = false; } else if (sources.size() + 1 == targets.size() && typeid(*targets.back()) == typeid(TransposeCodecSpec)) { --merge_count; size_mismatch = false; } } } if (size_mismatch) { return tensorstore::MaybeAnnotateStatus( absl::FailedPreconditionError(absl::StrFormat( "Mismatch in number of %s codecs (%d vs %d)", kIsArrayToArray ? "array -> array" : "bytes -> bytes", targets.size(), sources.size())), MergeErrorMessage(targets, sources, jb::Array(ZarrCodecJsonBinder))); } for (size_t i = 0; i < merge_count; ++i) { TENSORSTORE_RETURN_IF_ERROR( MergeZarrCodecSpecs(targets[i], sources[i].get(), strict)); } return absl::OkStatus(); } } absl::Status ZarrCodecChainSpec::MergeFrom(const ZarrCodecChainSpec& other, bool strict) { if (!strict) { size_t self_sharding_height = sharding_height(); size_t other_sharding_height = other.sharding_height(); if (self_sharding_height > other_sharding_height && array_to_array.empty() && bytes_to_bytes.empty()) { EnsureMutableCodecSpec(array_to_bytes); return static_cast<ZarrShardingCodecSpec&>( const_cast<ZarrArrayToBytesCodecSpec&>(*array_to_bytes)) .MergeSubChunkCodecsFrom(other, strict); } if (self_sharding_height < other_sharding_height && other.array_to_array.empty() && other.bytes_to_bytes.empty()) { auto new_array_to_bytes_codec = internal::static_pointer_cast<const ZarrShardingCodecSpec>( other.array_to_bytes->Clone()); TENSORSTORE_RETURN_IF_ERROR( const_cast<ZarrShardingCodecSpec&>(*new_array_to_bytes_codec) .MergeSubChunkCodecsFrom(*this, strict)); array_to_array.clear(); bytes_to_bytes.clear(); array_to_bytes = std::move(new_array_to_bytes_codec); return absl::OkStatus(); } } TENSORSTORE_RETURN_IF_ERROR( MergeZarrCodecSpecs(array_to_array, other.array_to_array, strict)); TENSORSTORE_RETURN_IF_ERROR( MergeZarrCodecSpecs(array_to_bytes, other.array_to_bytes.get(), strict)); TENSORSTORE_RETURN_IF_ERROR( MergeZarrCodecSpecs(bytes_to_bytes, other.bytes_to_bytes, strict)); return absl::OkStatus(); } absl::Status MergeZarrCodecSpecs( std::optional<ZarrCodecChainSpec>& target, const std::optional<ZarrCodecChainSpec>& source, bool strict) { if (!target) { if (source) { target = *source; } return absl::OkStatus(); } if (!source) { return absl::OkStatus(); } return target->MergeFrom(*source, strict); } bool ZarrShardingCodecSpec::SupportsInnerOrder( const ArrayCodecResolveParameters& decoded, span<DimensionIndex> preferred_inner_order) const { return true; } size_t ZarrShardingCodecSpec::sharding_height() const { auto* sub_chunk_codecs = this->GetSubChunkCodecs(); return 1 + (sub_chunk_codecs ? sub_chunk_codecs->sharding_height() : 0); } CodecSpec TensorStoreCodecSpec::Clone() const { return internal::CodecDriverSpec::Make<TensorStoreCodecSpec>(*this); } absl::Status TensorStoreCodecSpec::DoMergeFrom( const internal::CodecDriverSpec& other_base) { if (typeid(other_base) != typeid(TensorStoreCodecSpec)) { return absl::InvalidArgumentError(""); } auto& other = static_cast<const TensorStoreCodecSpec&>(other_base); return MergeZarrCodecSpecs(codecs, other.codecs, false); } TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER( TensorStoreCodecSpec, jb::Sequence( jb::Member("codecs", jb::Projection<&TensorStoreCodecSpec::codecs>(jb::Optional( ZarrCodecChainJsonBinder<true>))) )) namespace { const internal::CodecSpecRegistration<TensorStoreCodecSpec> encoding_registration; } } namespace internal { void CacheKeyEncoder<internal_zarr3::ZarrCodecChainSpec>::Encode( std::string* out, const internal_zarr3::ZarrCodecChainSpec& value) { internal::EncodeCacheKey(out, value.ToJson().value().dump()); } } } TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION( tensorstore::internal_zarr3::ZarrCodecChainSpec, tensorstore::serialization::JsonBindableSerializer< tensorstore::internal_zarr3::ZarrCodecChainSpec>())
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorstore/codec_spec.h" #include "tensorstore/driver/zarr3/codec/codec_test_util.h" #include "tensorstore/internal/json_gtest.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::CodecSpec; using ::tensorstore::MatchesJson; using ::tensorstore::MatchesStatus; using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson; using ::tensorstore::internal_zarr3::TestCodecMerge; using ::tensorstore::internal_zarr3::ZarrCodecChainSpec; TEST(CodecMergeTest, Basic) { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto a, CodecSpec::FromJson({ {"driver", "zarr3"}, {"codecs", {{ {"name", "sharding_indexed"}, {"configuration", { {"chunk_shape", {30, 40, 50}}, {"index_codecs", {GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}}, {"codecs", { {{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}}, GetDefaultBytesCodecJson(), {{"name", "gzip"}, {"configuration", {{"level", 6}}}}, }}, }}, }}}, })); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto b, CodecSpec::FromJson( {{"driver", "zarr3"}, {"codecs", {{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}}})); EXPECT_THAT(a.MergeFrom(b), MatchesStatus(absl::StatusCode::kFailedPrecondition, ".*: Incompatible \"level\": 6 vs 5")); } TEST(CodecChainSpecTest, MissingArrayToBytes) { EXPECT_THAT(ZarrCodecChainSpec::FromJson(::nlohmann::json::array_t()), MatchesStatus(absl::StatusCode::kInvalidArgument, "array -> bytes codec must be specified")); } TEST(CodecChainSpecTest, MergeCodecNameMismatch) { EXPECT_THAT( TestCodecMerge({"gzip"}, {"crc32c"}, true), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Cannot merge .*")); } TEST(CodecChainSpecTest, MergeArrayToBytes) { EXPECT_THAT( TestCodecMerge( {{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}, ::nlohmann::json::array_t(), true), ::testing::Optional(MatchesJson( {{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}}))); } TEST(CodecChainSpecTest, ExtraTranspose) { ::nlohmann::json a = { {{"name", "transpose"}, {"configuration", {{"order", {0, 2, 1}}}}}, {{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}, }; ::nlohmann::json b = { {{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}, }; EXPECT_THAT(TestCodecMerge(a, b, false), ::testing::Optional(MatchesJson(a))); EXPECT_THAT( TestCodecMerge(a, b, true), MatchesStatus(absl::StatusCode::kFailedPrecondition, ".*: Mismatch in number of array -> array codecs.*")); } TEST(CodecChainSpecTest, ExtraSharding) { ::nlohmann::json a = {{ {"name", "sharding_indexed"}, {"configuration", { {"chunk_shape", {30, 40, 50}}, {"index_codecs", {GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}}, {"codecs", { {{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}}, GetDefaultBytesCodecJson(), {{"name", "gzip"}, {"configuration", {{"level", 6}}}}, }}, }}, }}; ::nlohmann::json b = { {{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}}, GetDefaultBytesCodecJson(), {{"name", "gzip"}, {"configuration", {{"level", 6}}}}, }; ::nlohmann::json c = { GetDefaultBytesCodecJson(), {{"name", "gzip"}, {"configuration", {{"level", 6}}}}, }; EXPECT_THAT(TestCodecMerge(a, b, false), ::testing::Optional(MatchesJson(a))); EXPECT_THAT(TestCodecMerge(a, c, false), ::testing::Optional(MatchesJson(a))); EXPECT_THAT( TestCodecMerge(a, b, true), MatchesStatus(absl::StatusCode::kFailedPrecondition, ".*: Mismatch in number of array -> array codecs.*")); EXPECT_THAT(TestCodecMerge(a, c, true), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Cannot merge zarr codec constraints .*")); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc
4f887a6430414cd6088e1743555015b10f116d50
b0a5d668-8561-4789-820f-0d628bfa1730
cpp
tensorflow/tensorflow
journal
tensorflow/core/data/service/journal.cc
tensorflow/core/data/service/journal_test.cc
#include "tensorflow/core/data/service/journal.h" #include <algorithm> #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/lib/io/record_reader.h" #include "tensorflow/core/lib/io/record_writer.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/regexp.h" namespace tensorflow { namespace data { namespace { constexpr StringPiece kJournal = "journal"; Status ParseSequenceNumber(const std::string& journal_file, int64_t* sequence_number) { if (!RE2::FullMatch(journal_file, ".*_(\\d+)", sequence_number)) { return errors::InvalidArgument("Failed to parse journal file name: ", journal_file); } return absl::OkStatus(); } } std::string DataServiceJournalFile(const std::string& journal_dir, int64_t sequence_number) { return io::JoinPath(journal_dir, absl::StrCat(kJournal, "_", sequence_number)); } FileJournalWriter::FileJournalWriter(Env* env, const std::string& journal_dir) : env_(env), journal_dir_(journal_dir) {} Status FileJournalWriter::EnsureInitialized() { if (writer_) { return absl::OkStatus(); } std::vector<std::string> journal_files; TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(journal_dir_)); TF_RETURN_IF_ERROR(env_->GetChildren(journal_dir_, &journal_files)); int64_t latest_sequence_number = -1; for (const auto& file : journal_files) { int64_t sequence_number; TF_RETURN_IF_ERROR(ParseSequenceNumber(file, &sequence_number)); latest_sequence_number = std::max(latest_sequence_number, sequence_number); } std::string journal_file = DataServiceJournalFile(journal_dir_, latest_sequence_number + 1); TF_RETURN_IF_ERROR(env_->NewAppendableFile(journal_file, &file_)); writer_ = std::make_unique<io::RecordWriter>(file_.get()); VLOG(1) << "Created journal writer to write to " << journal_file; return absl::OkStatus(); } Status FileJournalWriter::Write(const Update& update) { TF_RETURN_IF_ERROR(EnsureInitialized()); std::string s = update.SerializeAsString(); if (s.empty()) { return errors::Internal("Failed to serialize update ", update.DebugString(), " to string"); } TF_RETURN_IF_ERROR(writer_->WriteRecord(s)); TF_RETURN_IF_ERROR(writer_->Flush()); TF_RETURN_IF_ERROR(file_->Sync()); if (VLOG_IS_ON(4)) { VLOG(4) << "Wrote journal entry: " << update.DebugString(); } return absl::OkStatus(); } FileJournalReader::FileJournalReader(Env* env, StringPiece journal_dir) : env_(env), journal_dir_(journal_dir) {} Status FileJournalReader::EnsureInitialized() { if (reader_) { return absl::OkStatus(); } return UpdateFile(DataServiceJournalFile(journal_dir_, 0)); } Status FileJournalReader::Read(Update& update, bool& end_of_journal) { TF_RETURN_IF_ERROR(EnsureInitialized()); while (true) { tstring record; Status s = reader_->ReadRecord(&record); if (absl::IsOutOfRange(s)) { sequence_number_++; std::string next_journal_file = DataServiceJournalFile(journal_dir_, sequence_number_); if (absl::IsNotFound(env_->FileExists(next_journal_file))) { VLOG(3) << "Next journal file " << next_journal_file << " does not exist. End of journal reached."; end_of_journal = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR(UpdateFile(next_journal_file)); continue; } TF_RETURN_IF_ERROR(s); if (!update.ParseFromString(record)) { return errors::DataLoss("Failed to parse journal record."); } if (VLOG_IS_ON(4)) { VLOG(4) << "Read journal entry: " << update.DebugString(); } end_of_journal = false; return absl::OkStatus(); } } Status FileJournalReader::UpdateFile(const std::string& filename) { VLOG(1) << "Reading from journal file " << filename; TF_RETURN_IF_ERROR(env_->NewRandomAccessFile(filename, &file_)); io::RecordReaderOptions opts; opts.buffer_size = 2 << 20; reader_ = std::make_unique<io::SequentialRecordReader>(file_.get(), opts); return absl::OkStatus(); } } }
#include "tensorflow/core/data/service/journal.h" #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/journal.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/data_service.pb.h" namespace tensorflow { namespace data { namespace { using ::testing::HasSubstr; bool NewJournalDir(std::string& journal_dir) { std::string filename = testing::TmpDir(); if (!Env::Default()->CreateUniqueFileName(&filename, "journal_dir")) { return false; } journal_dir = filename; return true; } Update MakeCreateIterationUpdate() { Update update; CreateIterationUpdate* create_iteration = update.mutable_create_iteration(); create_iteration->set_job_id(3); create_iteration->set_iteration_id(8); create_iteration->set_repetition(5); return update; } Update MakeFinishTaskUpdate() { Update update; FinishTaskUpdate* finish_task = update.mutable_finish_task(); finish_task->set_task_id(8); return update; } Update MakeRegisterDatasetUpdate() { Update update; RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset(); register_dataset->set_dataset_id("dataset_id"); register_dataset->set_fingerprint(3); return update; } Status CheckJournalContent(StringPiece journal_dir, const std::vector<Update>& expected) { FileJournalReader reader(Env::Default(), journal_dir); for (const auto& update : expected) { Update result; bool end_of_journal = true; TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal)); EXPECT_FALSE(end_of_journal); EXPECT_EQ(result.SerializeAsString(), update.SerializeAsString()); } Update result; bool end_of_journal = false; TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal)); EXPECT_TRUE(end_of_journal); return absl::OkStatus(); } } TEST(Journal, RoundTripMultiple) { std::string journal_dir; EXPECT_TRUE(NewJournalDir(journal_dir)); std::vector<Update> updates = {MakeCreateIterationUpdate(), MakeRegisterDatasetUpdate(), MakeFinishTaskUpdate()}; FileJournalWriter writer(Env::Default(), journal_dir); for (const auto& update : updates) { TF_EXPECT_OK(writer.Write(update)); } TF_EXPECT_OK(CheckJournalContent(journal_dir, updates)); } TEST(Journal, AppendExistingJournal) { std::string journal_dir; EXPECT_TRUE(NewJournalDir(journal_dir)); std::vector<Update> updates = {MakeCreateIterationUpdate(), MakeRegisterDatasetUpdate(), MakeFinishTaskUpdate()}; for (const auto& update : updates) { FileJournalWriter writer(Env::Default(), journal_dir); TF_EXPECT_OK(writer.Write(update)); } TF_EXPECT_OK(CheckJournalContent(journal_dir, updates)); } TEST(Journal, MissingFile) { std::string journal_dir; EXPECT_TRUE(NewJournalDir(journal_dir)); FileJournalReader reader(Env::Default(), journal_dir); Update result; bool end_of_journal = true; Status s = reader.Read(result, end_of_journal); EXPECT_TRUE(absl::IsNotFound(s)); } TEST(Journal, NonRecordData) { std::string journal_dir; EXPECT_TRUE(NewJournalDir(journal_dir)); TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir)); { std::unique_ptr<WritableFile> file; TF_ASSERT_OK(Env::Default()->NewAppendableFile( DataServiceJournalFile(journal_dir, 0), &file)); TF_ASSERT_OK(file->Append("not record data")); } FileJournalReader reader(Env::Default(), journal_dir); Update result; bool end_of_journal = true; Status s = reader.Read(result, end_of_journal); EXPECT_THAT(s.message(), HasSubstr("corrupted record")); EXPECT_EQ(s.code(), error::DATA_LOSS); } TEST(Journal, InvalidRecordData) { std::string journal_dir; EXPECT_TRUE(NewJournalDir(journal_dir)); TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir)); { std::unique_ptr<WritableFile> file; TF_ASSERT_OK(Env::Default()->NewAppendableFile( DataServiceJournalFile(journal_dir, 0), &file)); auto writer = std::make_unique<io::RecordWriter>(file.get()); TF_ASSERT_OK(writer->WriteRecord("not serialized proto")); } FileJournalReader reader(Env::Default(), journal_dir); Update result; bool end_of_journal = true; Status s = reader.Read(result, end_of_journal); EXPECT_THAT(s.message(), HasSubstr("Failed to parse journal record")); EXPECT_EQ(s.code(), error::DATA_LOSS); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9023f77c-6d2f-4bb9-b69d-4e2e666b234d
cpp
tensorflow/tensorflow
stream
tensorflow/core/tfrt/runtime/stream.cc
tensorflow/core/tfrt/runtime/stream_test.cc
#include "tensorflow/core/tfrt/runtime/stream.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/functional/any_invocable.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "absl/utility/utility.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tsl/platform/random.h" #include "tsl/platform/threadpool_interface.h" #include "tsl/profiler/lib/traceme.h" namespace tensorflow { namespace tfrt_stub { absl::StatusOr<std::optional<StreamCallbackId>> CreateStreamCallbackId( absl::string_view model_name, mlir::ModuleOp module) { mlir::Builder builder(module.getContext()); std::vector<mlir::TF::PwStreamResultsOp> ops; module->walk([&](mlir::TF::PwStreamResultsOp op) { ops.push_back(op); }); if (ops.empty()) { return std::nullopt; } auto& stream_interface = GetGlobalStreamCallbackRegistry().stream_interface(); auto controller_address = stream_interface.controller_address(); auto controller_address_attr = builder.getStringAttr(controller_address); auto model_name_attr = builder.getStringAttr(model_name); const StreamCallbackId callback_id( static_cast<int64_t>(tsl::random::New64())); auto callback_id_attr = builder.getI64IntegerAttr(callback_id.id); for (auto op : ops) { op->setAttr("_controller_address", controller_address_attr); op->setAttr("_model_name", model_name_attr); op->setAttr("_callback_id", callback_id_attr); } return callback_id; } absl::Status StreamCallbackRegistry::CallbackState::Invoke( tsl::thread::ThreadPoolInterface* thread_pool, StreamedResult result) { { absl::MutexLock lock(&mu_); if (closed_) { return absl::InternalError( "Failed to invole the callback that is closed."); } ++num_outstanding_; } thread_pool->Schedule([this, result = std::move(result)]() mutable { InvokeCallback(std::move(result)); absl::MutexLock lock(&mu_); --num_outstanding_; }); return absl::OkStatus(); } void StreamCallbackRegistry::CallbackState::Close() { { absl::MutexLock lock(&mu_); closed_ = true; auto not_running = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) { return num_outstanding_ == 0; }; mu_.Await(absl::Condition(&not_running)); } } void StreamCallbackRegistry::CallbackState::InvokeCallback( StreamedResult result) { absl::Duration dequeue_latency = absl::Now() - result.enqueued_time; interface().RecordDequeueLatency(model_name_, dequeue_latency); tsl::profiler::TraceMe trace_me("StreamCallbackInvocation"); trace_me.AppendMetadata([&]() { return tsl::profiler::TraceMeEncode({ {"callback_id", callback_id_.id}, {"step_id", step_id_.id}, }); }); absl::Time start_time = absl::Now(); callback_(std::move(result.tensors)); interface().RecordCallbackLatency(model_name_, absl::Now() - start_time); } absl::StatusOr<ScopedStreamCallback> StreamCallbackRegistry::Register( absl::string_view model_name, StreamCallbackId callback_id, StepId step_id, absl::AnyInvocable< void(absl::flat_hash_map<std::string, tensorflow::Tensor>)> callback) { absl::MutexLock l(&mu_); const auto [it, inserted] = stream_callbacks_.insert({std::make_pair(callback_id, step_id), nullptr}); if (!inserted) { return absl::AlreadyExistsError(absl::StrCat( "Stream callback ", callback_id, " @ ", step_id, " already exists")); } it->second = std::make_unique<CallbackState>(this, model_name, callback_id, step_id, std::move(callback)); return ScopedStreamCallback(this, callback_id, step_id); } absl::Status StreamCallbackRegistry::Invoke( tsl::thread::ThreadPoolInterface* thread_pool, StreamCallbackId callback_id, StepId step_id, StreamedResult result) { absl::MutexLock lock(&mu_); auto iter = stream_callbacks_.find({callback_id, step_id}); if (iter == stream_callbacks_.end()) { return absl::NotFoundError(absl::StrCat( "Stream callback ", callback_id, " @ ", step_id, " does not exist; this usually indicates that a streaming signature " "was called by a non-streaming request")); } auto* state = iter->second.get(); DCHECK(state); return state->Invoke(thread_pool, std::move(result)); } std::unique_ptr<StreamCallbackRegistry::CallbackState> StreamCallbackRegistry::Unregister(StreamCallbackId callback_id, StepId step_id) { absl::MutexLock l(&mu_); const auto it = stream_callbacks_.find({callback_id, step_id}); if (it == stream_callbacks_.end()) { return nullptr; } auto state = std::move(it->second); stream_callbacks_.erase(it); return state; } ScopedStreamCallback::ScopedStreamCallback(ScopedStreamCallback&& other) : registry_(other.registry_), callback_id_(other.callback_id_), step_id_(other.step_id_) { other.callback_id_ = std::nullopt; other.step_id_ = StepId::GetInvalidStepId(); } ScopedStreamCallback& ScopedStreamCallback::operator=( ScopedStreamCallback&& other) { Unregister(); registry_ = other.registry_; callback_id_ = other.callback_id_; step_id_ = other.step_id_; other.callback_id_ = std::nullopt; other.step_id_ = StepId::GetInvalidStepId(); return *this; } void ScopedStreamCallback::Unregister() { if (!callback_id_.has_value()) { return; } tsl::profiler::TraceMe trace_me("ScopedStreamCallback::Unregister"); trace_me.AppendMetadata([&]() { return tsl::profiler::TraceMeEncode({ {"callback_id", callback_id_->id}, {"step_id", step_id_.id}, }); }); DCHECK(registry_); auto state = registry_->Unregister(*callback_id_, step_id_); DCHECK(state); state->Close(); callback_id_.reset(); } StreamInterfaceFactory& GetGlobalStreamInterfaceFactory() { static auto* stream_interface_factory = new StreamInterfaceFactory; return *stream_interface_factory; } StreamCallbackRegistry& GetGlobalStreamCallbackRegistry() { static auto* stream_callback_registry = new StreamCallbackRegistry(GetGlobalStreamInterfaceFactory() .CreateControllerStreamInterface() .value()); return *stream_callback_registry; } } }
#include "tensorflow/core/tfrt/runtime/stream.h" #include <cstdint> #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/tfrt/runtime/step_id.h" #include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h" #include "tensorflow/core/tfrt/utils/thread_pool.h" #include "tsl/platform/env.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace tfrt_stub { namespace { using ::tensorflow::test::AsTensor; using ::testing::AnyOf; using ::testing::ElementsAreArray; using ::testing::Pair; using ::testing::UnorderedElementsAre; using ::testing::status::StatusIs; TEST(StreamTest, Simple) { StreamCallbackId callback_id(1234); StepId step_id(5678); std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> outputs; ScopedStreamCallback scoped_stream_callback; { TF_ASSERT_OK_AND_ASSIGN( scoped_stream_callback, GetGlobalStreamCallbackRegistry().Register( "test_model", callback_id, step_id, [&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) { outputs.push_back(std::move(arg)); })); std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected = {{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}}, {{"c", AsTensor<int32_t>({300})}}}; auto thread = absl::WrapUnique(tsl::Env::Default()->StartThread( tsl::ThreadOptions(), "fake_stream_client", [&]() { for (const auto& map : expected) { TfThreadPool thread_pool("test", 4); CHECK_OK(GetGlobalStreamCallbackRegistry().Invoke( &thread_pool, callback_id, step_id, {map, absl::Now()})); } })); } EXPECT_EQ(outputs.size(), 2); EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["a"]), ElementsAreArray({100})); EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["b"]), ElementsAreArray({200})); EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]["c"]), ElementsAreArray({300})); ScopedStreamCallback scoped_stream_callback_copy; scoped_stream_callback_copy = std::move(scoped_stream_callback); auto status = GetGlobalStreamCallbackRegistry().Register( "test_model", callback_id, step_id, [&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) { outputs.push_back(std::move(arg)); }); EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists)); } TEST(StreamTest, MultipleWriters) { StreamCallbackId callback_id(1234); StepId step_id(5678); std::vector<absl::flat_hash_map<std::string, std::vector<int32_t>>> outputs; { TfThreadPool thread_pool("test", 4); TF_ASSERT_OK_AND_ASSIGN( auto scoped_stream_callback, GetGlobalStreamCallbackRegistry().Register( "test_model", callback_id, step_id, [&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) { absl::flat_hash_map<std::string, std::vector<int32_t>> out; for (const auto& p : arg) { out[p.first] = GetTfTensorData<int32_t>(p.second); } outputs.push_back(std::move(out)); })); std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected = {{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}}, {{"c", AsTensor<int32_t>({300})}}}; for (const auto& p : expected) { tsl::Env::Default()->SchedClosure([&, callback_id, step_id, p]() { TfThreadPool thread_pool("test", 4); GetGlobalStreamCallbackRegistry() .Invoke(&thread_pool, callback_id, step_id, {p, absl::Now()}) .IgnoreError(); }); } absl::SleepFor(absl::Microseconds(100)); } LOG(INFO) << "StreamCallback receives " << outputs.size() << " outputs."; for (const auto& output : outputs) { EXPECT_THAT( output, AnyOf(UnorderedElementsAre(Pair("a", ElementsAreArray({100})), Pair("b", ElementsAreArray({200}))), UnorderedElementsAre(Pair("c", ElementsAreArray({300}))))); } } class TestStreamControllerInterface : public StreamControllerInterface { public: TestStreamControllerInterface() : StreamControllerInterface("test_controller_address") {} }; TEST(StreamControllerInterface, Initialize) { GetGlobalStreamInterfaceFactory().RegisterController( []() { return std::make_unique<TestStreamControllerInterface>(); }); TF_ASSERT_OK_AND_ASSIGN( auto controller_interface, GetGlobalStreamInterfaceFactory().CreateControllerStreamInterface()); EXPECT_EQ(controller_interface->controller_address(), "test_controller_address"); } class TestStreamWorkerInterface : public StreamWorkerInterface { public: explicit TestStreamWorkerInterface(std::string worker_address) : StreamWorkerInterface(worker_address) {} absl::Status InvokeStreamCallback( const StreamCallbackId& callback_id, const std::vector<std::string>& names, const std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>& responses) override { return absl::OkStatus(); } }; TEST(StreamWorkerInterface, Initialize) { GetGlobalStreamInterfaceFactory().RegisterWorker( [](absl::string_view address) -> absl::StatusOr<std::unique_ptr<TestStreamWorkerInterface>> { return std::make_unique<TestStreamWorkerInterface>( "test_worker_address"); }); TF_ASSERT_OK_AND_ASSIGN( auto worker_interface, GetGlobalStreamInterfaceFactory().CreateWorkerStreamInterface()( "test_worker_address")); EXPECT_EQ(worker_interface->controller_address(), "test_worker_address"); } TEST(StepId, Generate) { StepId step_id(1234); EXPECT_EQ(step_id.id, 1234); StepIdGenerator step_id_generator; EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(1)); EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(2)); EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(3)); } TEST(StepId, GlobalInitial) { EXPECT_EQ(GetGlobalInitialStepId(), 0); TEST_ScopedInitialStepId test_id(127); EXPECT_EQ(GetGlobalInitialStepId(), 127); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
748cb136-f55b-4f82-b4cb-af329ea655c6
cpp
tensorflow/tensorflow
sharding_propagation
third_party/xla/xla/service/sharding_propagation.cc
third_party/xla/xla/service/sharding_propagation_test.cc
#include "xla/service/sharding_propagation.h" #include <algorithm> #include <cstdint> #include <functional> #include <iterator> #include <list> #include <map> #include <memory> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "xla/array.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/hlo/ir/hlo_sharding_metadata.h" #include "xla/hlo/utils/hlo_sharding_util.h" #include "xla/protobuf_util.h" #include "xla/service/dot_as_convolution_util.h" #include "xla/service/host_memory_offload_annotations.h" #include "xla/service/spmd/shard_barrier_partitioner.h" #include "xla/shape.h" #include "xla/shape_tree.h" #include "xla/shape_util.h" #include "xla/sharding_op_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { std::optional<HloSharding> ReturnImprovedSharding( HloSharding sharding, HloInstruction* instruction, bool may_combine_partial_sharding, bool allow_aggressive_resharding = false) { return hlo_sharding_util::ReturnImprovedShardingImpl( std::move(sharding), instruction->has_sharding() ? &instruction->sharding() : nullptr, instruction->shape(), may_combine_partial_sharding, allow_aggressive_resharding); } std::optional<HloSharding> ReturnImprovedSubSharding( HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index, bool may_combine_partial_sharding, bool allow_aggressive_resharding = false) { if (instruction->has_sharding()) { const HloSharding to_improved = instruction->sharding().GetSubSharding(instruction->shape(), index); return hlo_sharding_util::ReturnImprovedShardingImpl( std::move(sharding), &to_improved, ShapeUtil::GetSubshape(instruction->shape(), index), may_combine_partial_sharding, allow_aggressive_resharding); } else { return hlo_sharding_util::ReturnImprovedShardingImpl( std::move(sharding), nullptr, ShapeUtil::GetSubshape(instruction->shape(), index), may_combine_partial_sharding, allow_aggressive_resharding); } } bool MaybeImproveInstructionSharding(HloSharding sharding, HloInstruction* instruction, bool may_combine_partial_sharding, bool allow_aggressive_resharding = false) { if (auto new_sharding = ReturnImprovedSharding( std::move(sharding), instruction, may_combine_partial_sharding, allow_aggressive_resharding)) { instruction->set_sharding(std::move(*new_sharding)); return true; } return false; } bool MaybeImproveInstructionSubSharding( HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index, bool may_combine_partial_sharding, bool allow_aggressive_resharding = false) { if (instruction->shape().IsTuple()) { if (auto new_sub_sharding = ReturnImprovedSubSharding( std::move(sharding), instruction, index, may_combine_partial_sharding, allow_aggressive_resharding)) { HloSharding new_sharding = instruction->has_sharding() ? instruction->sharding() : HloSharding::Single(instruction->shape(), HloSharding::Replicate()); ShapeTree<HloSharding> sharding_shape_tree = new_sharding.GetAsShapeTree(instruction->shape()); *sharding_shape_tree.mutable_element(index) = new_sub_sharding.value(); instruction->set_sharding(HloSharding::Tuple(sharding_shape_tree)); return true; } else { return false; } } CHECK(index.size() == 1 && index[0] == 0); return MaybeImproveInstructionSharding(std::move(sharding), instruction, may_combine_partial_sharding, allow_aggressive_resharding); } bool IsConvolutionKernelSmall(const HloInstruction* instruction) { CHECK_EQ(instruction->opcode(), HloOpcode::kConvolution); const HloInstruction* rhs = instruction->operand(1); const auto& dnums = instruction->convolution_dimension_numbers(); int64_t kernel_dim_prod = 1; int64_t output_dim_prod = 1; for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) { int64_t kernel_dim = rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i)); kernel_dim_prod *= kernel_dim; int64_t output_dim = instruction->shape().dimensions(dnums.output_spatial_dimensions(i)); output_dim_prod *= output_dim; if (kernel_dim >= output_dim && (i < 2 || kernel_dim > 3 || kernel_dim_prod >= output_dim_prod)) { return false; } } return true; } bool IsPassthroughCustomOps(const HloInstruction* hlo) { if (hlo->IsCustomCall({"Sharding", "X64Combine", "LayoutConstraint"})) { return true; } if (hlo->operand_count() != 1 || !hlo->shape().IsArray() || !hlo->operand(0)->shape().IsArray() || hlo->operand(0)->shape().rank() != hlo->shape().rank()) { return false; } return hlo->IsCustomCall( {"ResizeNearest", "ResizeBilinear", "ResizeNearestGrad", "ResizeBilinearGrad", "Cholesky", host_memory_offload_annotations::kMoveToDeviceCustomCallTarget, host_memory_offload_annotations::kMoveToHostCustomCallTarget}); } const HloInstruction* PickRepresentativeOperand( const HloInstruction* instruction) { switch (instruction->opcode()) { case HloOpcode::kMap: case HloOpcode::kPad: case HloOpcode::kPower: case HloOpcode::kOptimizationBarrier: case HloOpcode::kReverse: case HloOpcode::kSlice: case HloOpcode::kShiftLeft: case HloOpcode::kShiftRightArithmetic: case HloOpcode::kShiftRightLogical: if (instruction->operand(0)->has_sharding()) { return instruction->operand(0); } return nullptr; case HloOpcode::kAbs: case HloOpcode::kAdd: case HloOpcode::kAnd: case HloOpcode::kAtan2: case HloOpcode::kBitcastConvert: case HloOpcode::kCeil: case HloOpcode::kClamp: case HloOpcode::kClz: case HloOpcode::kCompare: case HloOpcode::kComplex: case HloOpcode::kConcatenate: case HloOpcode::kConvert: case HloOpcode::kCopy: case HloOpcode::kCos: case HloOpcode::kAllGather: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: case HloOpcode::kAllToAll: case HloOpcode::kCollectiveBroadcast: case HloOpcode::kCollectivePermute: case HloOpcode::kDivide: case HloOpcode::kErf: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kFloor: case HloOpcode::kImag: case HloOpcode::kIsFinite: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kLogistic: case HloOpcode::kMaximum: case HloOpcode::kMinimum: case HloOpcode::kMultiply: case HloOpcode::kNegate: case HloOpcode::kNot: case HloOpcode::kOr: case HloOpcode::kPopulationCount: case HloOpcode::kReal: case HloOpcode::kReducePrecision: case HloOpcode::kRemainder: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kRsqrt: case HloOpcode::kSelect: case HloOpcode::kSign: case HloOpcode::kSin: case HloOpcode::kTopK: case HloOpcode::kSort: case HloOpcode::kSqrt: case HloOpcode::kCbrt: case HloOpcode::kSubtract: case HloOpcode::kStochasticConvert: case HloOpcode::kTan: case HloOpcode::kTanh: case HloOpcode::kWhile: case HloOpcode::kXor: { const HloInstruction* best_operand = nullptr; for (const HloInstruction* operand : instruction->operands()) { if (operand->has_sharding() && (best_operand == nullptr || hlo_sharding_util::IsShardingMoreSpecific( operand->sharding(), best_operand->sharding()))) { best_operand = operand; } } return best_operand; } case HloOpcode::kCustomCall: { if (IsPassthroughCustomOps(instruction)) { return instruction->operand(0); } return nullptr; } case HloOpcode::kAddDependency: case HloOpcode::kAfterAll: case HloOpcode::kAsyncStart: case HloOpcode::kAsyncUpdate: case HloOpcode::kAsyncDone: case HloOpcode::kAllGatherStart: case HloOpcode::kAllGatherDone: case HloOpcode::kAllReduceStart: case HloOpcode::kAllReduceDone: case HloOpcode::kBatchNormGrad: case HloOpcode::kBatchNormInference: case HloOpcode::kBatchNormTraining: case HloOpcode::kBitcast: case HloOpcode::kBroadcast: case HloOpcode::kCall: case HloOpcode::kCholesky: case HloOpcode::kCollectivePermuteDone: case HloOpcode::kCollectivePermuteStart: case HloOpcode::kConditional: case HloOpcode::kConstant: case HloOpcode::kConvolution: case HloOpcode::kCopyDone: case HloOpcode::kCopyStart: case HloOpcode::kDomain: case HloOpcode::kDot: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kDynamicReshape: case HloOpcode::kFft: case HloOpcode::kFusion: case HloOpcode::kGather: case HloOpcode::kGetTupleElement: case HloOpcode::kInfeed: case HloOpcode::kIota: case HloOpcode::kOutfeed: case HloOpcode::kParameter: case HloOpcode::kPartitionId: case HloOpcode::kRecv: case HloOpcode::kRecvDone: case HloOpcode::kReduce: case HloOpcode::kReduceWindow: case HloOpcode::kReplicaId: case HloOpcode::kReshape: case HloOpcode::kRng: case HloOpcode::kRngGetAndUpdateState: case HloOpcode::kRngBitGenerator: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kSend: case HloOpcode::kSendDone: case HloOpcode::kTranspose: case HloOpcode::kTriangularSolve: case HloOpcode::kTuple: case HloOpcode::kGetDimensionSize: case HloOpcode::kSetDimensionSize: return nullptr; } } bool SupportSpatialPartitioning( const HloInstruction* instruction, const ShardingPropagation::ComputationMap& computation_map, bool is_spmd, bool allow_spmd_sharding_propagation_to_output, bool allow_spmd_sharding_propagation_to_parameters, const CustomCallShardingHelper* sharding_helper) { const bool is_entry_root = instruction->parent() ->parent() ->entry_computation() ->root_instruction() == instruction; if (instruction->parent()->root_instruction() == instruction && computation_map.find(instruction->parent()) == computation_map.end() && !(is_entry_root && allow_spmd_sharding_propagation_to_output)) { return false; } if (instruction->IsElementwise() && (instruction->opcode() != HloOpcode::kRng || is_spmd)) { return true; } switch (instruction->opcode()) { case HloOpcode::kBroadcast: case HloOpcode::kConcatenate: case HloOpcode::kConditional: case HloOpcode::kConstant: case HloOpcode::kConvolution: case HloOpcode::kOptimizationBarrier: case HloOpcode::kDot: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGather: case HloOpcode::kGetTupleElement: case HloOpcode::kInfeed: case HloOpcode::kIota: case HloOpcode::kPad: case HloOpcode::kReduceWindow: case HloOpcode::kReshape: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kSlice: case HloOpcode::kSort: case HloOpcode::kTranspose: case HloOpcode::kTuple: case HloOpcode::kWhile: case HloOpcode::kReduce: case HloOpcode::kRngBitGenerator: case HloOpcode::kAllReduce: case HloOpcode::kReduceScatter: return true; case HloOpcode::kParameter: return allow_spmd_sharding_propagation_to_parameters || computation_map.find(instruction->parent()) != computation_map.end(); case HloOpcode::kReverse: return is_spmd; case HloOpcode::kCustomCall: if (!is_spmd) { return false; } if (auto* partitioner = GetCustomCallPartitioner(instruction->custom_call_target())) { return partitioner->IsCustomCallShardable(instruction); } return (IsPassthroughCustomOps(instruction) || sharding_helper->IsCustomCallShardable(instruction)); default: return false; } } std::optional<HloSharding> LookaheadUserSharding(HloInstruction* instr, bool is_spmd, const CallGraph& call_graph) { if (instr->user_count() != 1) { return std::nullopt; } HloInstruction* current_user = instr->users()[0]; std::optional<HloSharding> sharding; std::vector<HloInstruction*> users_chain = {instr, current_user}; while (!current_user->has_sharding()) { if (current_user->users().size() != 1) { users_chain.clear(); break; } current_user = current_user->users()[0]; users_chain.push_back(current_user); } if (users_chain.empty()) { return std::nullopt; } for (int i = users_chain.size() - 1; i >= 1; --i) { HloInstruction* user = users_chain[i]; HloInstruction* current = users_chain[i - 1]; CHECK(user->has_sharding()); sharding = ShardingPropagation::GetShardingFromUser( *current, *user, INT64_MAX, is_spmd, call_graph, nullptr); if (sharding.has_value() && i != 1) { current->set_sharding(*sharding); continue; } break; } for (int i = 1; i < users_chain.size() - 1; ++i) { users_chain[i]->clear_sharding(); } return sharding; } bool InferGatherParallelShardingFromOperands( HloInstruction* instruction, const hlo_sharding_util::GatherScatterParallelDims& parallel_dims, bool may_combine_partial_sharding) { CHECK(DynCast<HloGatherInstruction>(instruction)); bool changed = false; auto output_parallel_dims = hlo_sharding_util::GetGatherParallelOutputDims( *instruction, parallel_dims); if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) { changed |= MaybeImproveInstructionSharding( hlo_sharding_util:: InferGatherScatterParallelShardingFromOperandSharding( instruction->operand(0)->sharding(), instruction->shape(), absl::MakeConstSpan(parallel_dims.operand_parallel_dims), absl::MakeConstSpan(output_parallel_dims)), instruction, may_combine_partial_sharding); } if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) { changed |= MaybeImproveInstructionSharding( hlo_sharding_util:: InferGatherScatterParallelShardingFromOperandSharding( instruction->operand(1)->sharding(), instruction->shape(), absl::MakeConstSpan(parallel_dims.indices_parallel_dims), absl::MakeConstSpan(output_parallel_dims)), instruction, may_combine_partial_sharding); } return changed; } bool InferScatterParallelShardingFromOperands( HloInstruction* instruction, const hlo_sharding_util::GatherScatterParallelDims& parallel_dims, bool may_combine_partial_sharding) { HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(instruction); CHECK(scatter); const int64_t operand_count = scatter->scatter_operand_count(); auto scatter_operands = scatter->scatter_operands(); auto scatter_indices = scatter->scatter_indices(); auto scatter_updates = scatter->scatter_updates(); bool changed = false; auto update_parallel_dims = hlo_sharding_util::GetScatterParallelUpdateDims( *instruction, parallel_dims); Shape shape = operand_count == 1 ? instruction->shape() : ShapeUtil::GetSubshape(instruction->shape(), {0}); for (int64_t i = 0; i != operand_count; ++i) { if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) { changed |= MaybeImproveInstructionSubSharding( hlo_sharding_util:: InferGatherScatterParallelShardingFromOperandSharding( scatter_operands[i]->sharding(), shape, absl::MakeConstSpan(parallel_dims.operand_parallel_dims), absl::MakeConstSpan(parallel_dims.operand_parallel_dims)), instruction, {i}, may_combine_partial_sharding); } } if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)) { auto parallel_sharding_from_indices = hlo_sharding_util:: InferGatherScatterParallelShardingFromOperandSharding( scatter_indices->sharding(), shape, absl::MakeConstSpan(parallel_dims.indices_parallel_dims), absl::MakeConstSpan(parallel_dims.operand_parallel_dims)); for (int64_t i = 0; i != operand_count; ++i) { changed |= MaybeImproveInstructionSubSharding( parallel_sharding_from_indices, instruction, {i}, may_combine_partial_sharding); } } for (int64_t i = 0; i != operand_count; ++i) { if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) { changed |= MaybeImproveInstructionSubSharding( hlo_sharding_util:: InferGatherScatterParallelShardingFromOperandSharding( scatter_updates[i]->sharding(), shape, absl::MakeConstSpan(update_parallel_dims), absl::MakeConstSpan(parallel_dims.operand_parallel_dims)), instruction, {i}, may_combine_partial_sharding); } } return changed; } bool CanPropagateThroughAtAggressiveLevel(const HloInstruction& inst, int64_t aggressiveness) { if (aggressiveness < 1 && !(inst.IsElementwise() || inst.IsCustomCall("Sharding")) && inst.opcode() != HloOpcode::kTranspose && inst.opcode() != HloOpcode::kReshape && inst.opcode() != HloOpcode::kTuple && inst.opcode() != HloOpcode::kGetTupleElement && inst.opcode() != HloOpcode::kWhile && inst.opcode() != HloOpcode::kDynamicSlice && inst.opcode() != HloOpcode::kDynamicUpdateSlice && inst.opcode() != HloOpcode::kOptimizationBarrier && inst.opcode() != HloOpcode::kConcatenate && inst.opcode() != HloOpcode::kCall && inst.opcode() != HloOpcode::kCopy) { return false; } if (aggressiveness < 2 && inst.opcode() == HloOpcode::kBroadcast) { return false; } return true; } bool SameShardingMetadata(const HloSharding& a, const HloSharding& b) { DCHECK_EQ(a, b); auto same_metadata = [](absl::Span<const OpMetadata> a, absl::Span<const OpMetadata> b) { if (a.size() != b.size()) return false; for (int i = 0, e = a.size(); i < e; ++i) { if (!protobuf_util::ProtobufEquals(a[i], b[i])) { return false; } } return true; }; if (a.IsTuple()) { for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) { if (!same_metadata(a.tuple_elements()[i].metadata(), b.tuple_elements()[i].metadata())) { return false; } } return true; } else { return same_metadata(a.metadata(), b.metadata()); } } bool AssignShardingMetadata( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { const auto& metadata = instruction->metadata(); if (!instruction->has_sharding() || metadata.ByteSizeLong() == 0) { continue; } HloSharding sharding_with_metadata = instruction->sharding().WithMetadata({metadata}, false); if (!SameShardingMetadata(instruction->sharding(), sharding_with_metadata)) { instruction->set_sharding(std::move(sharding_with_metadata)); changed = true; } } } return changed; } bool RemoveShardingMetadata( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (!instruction->has_sharding()) { continue; } HloSharding sharding_no_metadata = instruction->sharding().WithoutMetadata(); if (!SameShardingMetadata(instruction->sharding(), sharding_no_metadata)) { instruction->set_sharding(std::move(sharding_no_metadata)); changed = true; } } } return changed; } absl::Status CheckAndUpdateDeviceAssignmentsInWhileBody( HloInstruction* while_instruction) { auto bad_status = [](HloInstruction* instruction, int64_t device, HloInstruction* channel_instruction, int64_t correct_device) { return FailedPrecondition( "Instruction: %s is on device: %d, which conflicts with device: %d " "of channel instruction: %s", instruction->name(), device, correct_device, channel_instruction->name()); }; CHECK_EQ(while_instruction->opcode(), HloOpcode::kWhile); HloComputation* while_body = while_instruction->while_body(); std::map<int64_t, HloInstruction*> devices_to_instructions; std::optional<int64_t> unique_device = std::nullopt; HloInstruction* channel_instruction = nullptr; for (HloInstruction* instruction : while_body->instructions()) { if (instruction->sharding_unique_device()) { auto opcode = instruction->opcode(); int64_t device = *instruction->sharding_unique_device(); if (unique_device.has_value()) { if (*unique_device != device) { return bad_status(instruction, device, channel_instruction, *unique_device); } } else if (((opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv) && !Cast<HloSendRecvInstruction>(instruction) ->is_host_transfer()) || ((opcode == HloOpcode::kAllReduce || opcode == HloOpcode::kReduceScatter) && instruction->channel_id())) { channel_instruction = instruction; unique_device = device; if (!devices_to_instructions.empty()) { for (auto it = devices_to_instructions.begin(); it != devices_to_instructions.end(); ++it) { if (*unique_device != it->first) { return bad_status(it->second, it->first, channel_instruction, *unique_device); } } } } else { devices_to_instructions[device] = instruction; } } } if (unique_device.has_value()) { auto while_device = while_instruction->sharding_unique_device(); if (while_device.has_value() && *unique_device != *while_device) { return bad_status(while_instruction, *while_device, channel_instruction, *unique_device); } auto body_root = while_body->root_instruction(); auto root_device = body_root->sharding_unique_device(); if (!root_device.has_value()) { body_root->set_device_sharding(*unique_device); } else if (*unique_device != *root_device) { return bad_status(body_root, *root_device, channel_instruction, *unique_device); } } return absl::OkStatus(); } bool RefineManualAutoShardingFromAuto( const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims, HloSharding* auto_sharding, HloSharding* manual_sharding) { if (!manual_sharding->IsManualSubgroup() || auto_sharding->IsManualSubgroup() || !manual_sharding->HasPartialReplication() || manual_sharding->subgroup_types().size() != 2) { return false; } HloSharding partial_rep = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( to_merge, unspecified_dims); if (partial_rep.IsTileMaximal()) { return false; } if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep, auto_sharding)) { return false; } const int64_t data_rank = partial_rep.TiledDataRank(); std::vector<int64_t> partial_manual_shape( partial_rep.tile_assignment().dimensions().begin(), partial_rep.tile_assignment().dimensions().end()); partial_manual_shape.insert(partial_manual_shape.begin() + data_rank, 1); auto partial_tiling_for_manual = partial_rep.tile_assignment().Reshape(partial_manual_shape); HloSharding partial_rep_for_manual = HloSharding::PartialTile( partial_tiling_for_manual, partial_rep.metadata()); auto man_tiling = manual_sharding->tile_assignment(); if (manual_sharding->subgroup_types().back() != OpSharding::REPLICATED) { std::vector<int> transposed_dims(man_tiling.num_dimensions()); absl::c_iota(transposed_dims, 0); std::swap(transposed_dims.back(), transposed_dims[data_rank]); man_tiling = man_tiling.Transpose(transposed_dims); } HloSharding tmp_sharding_for_merging = HloSharding::PartialTile( std::move(man_tiling), manual_sharding->metadata()); if (!hlo_sharding_util::MergeShardingIfCompatible( partial_rep_for_manual, &tmp_sharding_for_merging)) { return false; } std::vector<OpSharding::Type> subgroup_types; subgroup_types.push_back(OpSharding::MANUAL); if (tmp_sharding_for_merging.HasPartialReplication()) { subgroup_types.push_back(OpSharding::REPLICATED); } *manual_sharding = HloSharding::Subgroup( tmp_sharding_for_merging.tile_assignment(), subgroup_types, tmp_sharding_for_merging.metadata()); return true; } bool RefineManualAutoShardingFromManual( const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims, HloSharding* auto_sharding, HloSharding* manual_sharding) { if (!to_merge.IsManualSubgroup() || !manual_sharding->IsManualSubgroup() || !manual_sharding->HasPartialReplication() || auto_sharding->IsManualSubgroup() || manual_sharding->subgroup_types().size() != 2) { return false; } HloSharding partial_rep = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( to_merge, unspecified_dims); if (partial_rep.IsTileMaximal()) { return false; } if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep, manual_sharding)) { return false; } HloSharding partial_rep_for_auto = HloSharding::Subgroup( partial_rep.tile_assignment(), std::vector<OpSharding::Type>(partial_rep.subgroup_types().size(), OpSharding::REPLICATED), partial_rep.metadata()); if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep_for_auto, auto_sharding)) { return false; } return true; } bool InferUnspecifiedDimsFromOperand(HloInstruction* annotate_op, absl::Span<const int64_t> unspecified_dims, HloInstruction** man_conversion_op_after) { CHECK(annotate_op->IsCustomCall("Sharding") || annotate_op->opcode() == HloOpcode::kCopy); if (!hlo_sharding_util::IsSpatiallyPartitioned(annotate_op->operand(0))) { return false; } const HloSharding& operand_sharding = annotate_op->operand(0)->sharding(); if (!operand_sharding.IsTiled()) { return false; } HloInstruction* man_conversion_op = nullptr; if (annotate_op->user_count() == 1) { HloInstruction* user = annotate_op->users()[0]; if (user->IsCustomCall("SPMDFullToShardShape") || user->IsCustomCall("SPMDShardToFullShape")) { std::vector<int64_t> user_unspec_dims; if (!sharding_op_util::ParseAttributes( Cast<HloCustomCallInstruction>(user)->opaque(), &user_unspec_dims) .ok()) { return false; } absl::c_sort(user_unspec_dims); if (unspecified_dims != user_unspec_dims) { return false; } man_conversion_op = user; } } *man_conversion_op_after = man_conversion_op; if (man_conversion_op == nullptr) { HloSharding partial_replicated = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( operand_sharding, unspecified_dims); HloSharding sharding = annotate_op->sharding(); if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated, &sharding)) { return false; } annotate_op->set_sharding(sharding); return true; } if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) { HloSharding auto_sharding = annotate_op->sharding(); HloSharding manual_sharding = man_conversion_op->sharding(); if (!RefineManualAutoShardingFromAuto(operand_sharding, unspecified_dims, &auto_sharding, &manual_sharding)) { return false; } annotate_op->set_sharding(auto_sharding); man_conversion_op->set_sharding(manual_sharding); return true; } CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape")); HloSharding manual_sharding = annotate_op->sharding(); HloSharding auto_sharding = man_conversion_op->sharding(); if (!RefineManualAutoShardingFromManual(operand_sharding, unspecified_dims, &auto_sharding, &manual_sharding)) { return false; } annotate_op->set_sharding(manual_sharding); man_conversion_op->set_sharding(auto_sharding); return true; } bool InferUnspecifiedDimsFromOneUser(HloInstruction* annotate_op, const HloInstruction* user, int64_t aggressiveness, bool is_spmd, absl::Span<const int64_t> unspecified_dims, HloInstruction* man_conversion_op, const CallGraph& call_graph) { CHECK(annotate_op->IsCustomCall("Sharding") || annotate_op->opcode() == HloOpcode::kCopy); if (!user->has_sharding() || !user->sharding().IsTiled()) { return false; } std::optional<HloSharding> user_sharding = ShardingPropagation::GetShardingFromUser( man_conversion_op == nullptr ? *annotate_op : *man_conversion_op, *user, aggressiveness, is_spmd, call_graph, nullptr); if (!user_sharding.has_value() || user_sharding->IsTileMaximal()) { return false; } if (man_conversion_op == nullptr) { HloSharding partial_replicated = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( *user_sharding, unspecified_dims); HloSharding sharding = annotate_op->sharding(); if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated, &sharding)) { return false; } annotate_op->set_sharding(sharding); return true; } if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) { HloSharding auto_sharding = annotate_op->sharding(); HloSharding manual_sharding = man_conversion_op->sharding(); if (!RefineManualAutoShardingFromManual(*user_sharding, unspecified_dims, &auto_sharding, &manual_sharding)) { return false; } annotate_op->set_sharding(auto_sharding); man_conversion_op->set_sharding(manual_sharding); return true; } CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape")); HloSharding manual_sharding = annotate_op->sharding(); HloSharding auto_sharding = man_conversion_op->sharding(); if (!RefineManualAutoShardingFromAuto(*user_sharding, unspecified_dims, &auto_sharding, &manual_sharding)) { return false; } annotate_op->set_sharding(manual_sharding); man_conversion_op->set_sharding(auto_sharding); return true; } bool InferUnspecifiedDimsFromUsers(HloInstruction* annotate_op, absl::Span<const int64_t> unspecified_dims, int64_t aggressiveness, bool is_spmd, HloInstruction** man_conversion_op_after, const CallGraph& call_graph) { HloInstruction* man_conversion_op = nullptr; if (annotate_op->user_count() == 1) { HloInstruction* user = annotate_op->users()[0]; if (user->IsCustomCall("SPMDFullToShardShape") || user->IsCustomCall("SPMDShardToFullShape")) { std::vector<int64_t> user_unspec_dims; absl::c_sort(user_unspec_dims); if (!sharding_op_util::ParseAttributes( Cast<HloCustomCallInstruction>(user)->opaque(), &user_unspec_dims) .ok() || unspecified_dims != user_unspec_dims) { return false; } man_conversion_op = user; } } *man_conversion_op_after = man_conversion_op; HloInstruction* op_for_users = man_conversion_op == nullptr ? annotate_op : man_conversion_op; bool changed = false; for (HloInstruction* user : op_for_users->users()) { changed |= InferUnspecifiedDimsFromOneUser( annotate_op, user, aggressiveness, is_spmd, unspecified_dims, man_conversion_op, call_graph); } return changed; } bool InferUnspecifiedDimsFromShardGroup( HloInstruction* annotate_op, absl::Span<const int64_t> unspecified_dims, const absl::flat_hash_set<HloInstruction*>& shard_group) { CHECK(annotate_op->IsCustomCall("Sharding") || annotate_op->opcode() == HloOpcode::kCopy); if (annotate_op->IsCustomCall(spmd::kShardBarrierTo)) { return false; } bool changed = false; for (const HloInstruction* member : shard_group) { if (member == annotate_op) { continue; } if (member->IsCustomCall(spmd::kShardBarrierFrom)) { continue; } if (!hlo_sharding_util::IsSpatiallyPartitioned(member)) { continue; } const HloSharding& member_sharding = member->sharding(); if (!member_sharding.IsTiled()) { continue; } HloSharding partial_replicated = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( member_sharding, unspecified_dims); HloSharding sharding = annotate_op->sharding(); if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated, &sharding)) { continue; } annotate_op->set_sharding(sharding); changed |= true; } return changed; } bool IsCSEPreventionTarget(const HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kBroadcast && instruction->operand(0)->shape().rank() == 0; } HloSharding SetCSEPreventionSharding(const HloSharding& sharding) { OpMetadata metadata; metadata.set_op_name("_sharding_propagation_cse_prevention"); return sharding.WithMetadata({metadata}, true); } bool IsCSEPreventionSharding(const HloSharding& sharding) { if (sharding.metadata().size() != 1) { return false; } return sharding.metadata()[0].op_name() == "_sharding_propagation_cse_prevention"; } } bool InferDotShardingFromOperands( HloInstruction* instruction, const CallGraph& call_graph, const dot_as_convolution_util::DotConvolutionDimsInfo& dnums, bool may_combine_partial_sharding, bool is_spmd) { auto from_operand = [&](int64_t operand_index) { auto operand = instruction->operand(operand_index); const HloSharding& operand_sharding = operand->sharding(); if (operand_sharding.IsTileMaximal()) { return operand_sharding; } std::vector<int64_t> contracting_dims; contracting_dims.reserve(dnums.contracting_dims.size()); for (const auto& dim : dnums.contracting_dims) { contracting_dims.push_back(operand_index == 0 ? dim.lhs : dim.rhs); } for (const auto& dim : operand_index == 0 ? dnums.rhs_non_contracting_dims : dnums.lhs_non_contracting_dims) { int64_t d = operand_index == 0 ? dim.lhs : dim.rhs; if (d >= 0) { contracting_dims.push_back(d); } } auto replicate_contracting_dims = hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( operand_sharding, contracting_dims); std::vector<int64_t> out_dims_to_op_perm(instruction->shape().rank(), -1); std::vector<int64_t> op_dims_to_output_perm(operand->shape().rank(), -1); for (const auto& dim : dnums.batch_dims) { out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs; op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] = dim.output; } for (const auto& dim : operand_index == 0 ? dnums.lhs_non_contracting_dims : dnums.rhs_non_contracting_dims) { out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs; op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] = dim.output; } return *hlo_sharding_util::TransposeShardingWithCollapsedDims( replicate_contracting_dims, op_dims_to_output_perm, out_dims_to_op_perm); }; std::optional<HloSharding> improved_operand_0; std::optional<HloSharding> improved_operand_1; if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) { improved_operand_0 = ReturnImprovedSharding( from_operand(0), instruction, may_combine_partial_sharding, false); } if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) { improved_operand_1 = ReturnImprovedSharding( from_operand(1), instruction, may_combine_partial_sharding, false); } if (!improved_operand_0.has_value() && !improved_operand_1.has_value()) { return false; } if (improved_operand_0.has_value() && !improved_operand_1.has_value()) { instruction->set_sharding(*improved_operand_0); return true; } if (!improved_operand_0.has_value() && improved_operand_1.has_value()) { instruction->set_sharding(*improved_operand_1); return true; } CHECK(improved_operand_0.has_value() && improved_operand_1.has_value()); std::optional<HloSharding> lookahead_sharding = LookaheadUserSharding(instruction, is_spmd, call_graph); std::array<HloSharding, 2> sharding_priority = {*improved_operand_0, *improved_operand_1}; bool priority_defined_with_lookahead = false; if (lookahead_sharding.has_value()) { const bool operand_0_is_lookahead_subtiling = hlo_sharding_util::IsSubTilingOrEqualSharding( instruction->shape(), *lookahead_sharding, *improved_operand_0); const bool operand_1_is_lookahead_subtiling = hlo_sharding_util::IsSubTilingOrEqualSharding( instruction->shape(), *lookahead_sharding, *improved_operand_1); if (operand_0_is_lookahead_subtiling && !operand_1_is_lookahead_subtiling) { priority_defined_with_lookahead = true; } if (!operand_0_is_lookahead_subtiling && operand_1_is_lookahead_subtiling) { instruction->set_sharding(*improved_operand_1); std::swap(sharding_priority[0], sharding_priority[1]); priority_defined_with_lookahead = true; } } if (!priority_defined_with_lookahead && ShapeUtil::ByteSizeOf(instruction->operand(0)->shape()) < ShapeUtil::ByteSizeOf(instruction->operand(1)->shape())) { std::swap(sharding_priority[0], sharding_priority[1]); } instruction->set_sharding(sharding_priority[0]); MaybeImproveInstructionSharding(sharding_priority[1], instruction, may_combine_partial_sharding); return true; } bool InferConvolutionShardingFromOperands(HloInstruction* instruction, const CallGraph& call_graph, int64_t aggressiveness, bool may_combine_partial_sharding, bool is_spmd) { auto get_partitions_for_dims = [&](const HloInstruction* inst, absl::Span< const dot_as_convolution_util::DotConvolutionDimsInfo::DimNums> dims, int lhs_or_rhs) { int64_t partitions = 1; if (!inst->has_sharding()) { return partitions; } const auto& sharding = inst->sharding(); if (sharding.IsTileMaximal()) { return partitions; } for (const auto& dim : dims) { if (lhs_or_rhs == 0) { partitions *= sharding.tile_assignment().dim(dim.lhs); } else { CHECK_EQ(lhs_or_rhs, 1); partitions *= sharding.tile_assignment().dim(dim.rhs); } } return partitions; }; auto dot_dims = dot_as_convolution_util::ParseConvolutionDimsInfo(instruction); const int64_t lhs_conv_spatial_partitions = get_partitions_for_dims( instruction->operand(0), dot_dims.conv_spatial_dims, 0); const int64_t rhs_conv_spatial_partitions = get_partitions_for_dims( instruction->operand(1), dot_dims.conv_spatial_dims, 1); if (dot_dims.conv_spatial_dims.empty() || (lhs_conv_spatial_partitions == 1 && rhs_conv_spatial_partitions == 1 && instruction->batch_group_count() == 1 && instruction->feature_group_count() == 1)) { return InferDotShardingFromOperands(instruction, call_graph, dot_dims, may_combine_partial_sharding, is_spmd); } const auto& dnums = instruction->convolution_dimension_numbers(); const HloInstruction* lhs = instruction->operand(0); auto get_tiled_sharding_based_on_lhs = [&] { CHECK(!lhs->sharding().IsTileMaximal()); std::vector<int64_t> output_to_lhs_indices(instruction->shape().rank()); output_to_lhs_indices[dnums.output_batch_dimension()] = dnums.input_batch_dimension(); output_to_lhs_indices[dnums.output_feature_dimension()] = dnums.input_feature_dimension(); for (int64_t i = 0; i < dnums.input_spatial_dimensions_size(); ++i) { output_to_lhs_indices[dnums.output_spatial_dimensions(i)] = dnums.input_spatial_dimensions(i); } return hlo_sharding_util::TransposeSharding(lhs->sharding(), output_to_lhs_indices); }; if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) { return false; } if (lhs->sharding().IsTileMaximal()) { return MaybeImproveInstructionSharding(lhs->sharding(), instruction, may_combine_partial_sharding); } if (IsConvolutionKernelSmall(instruction)) { const auto& tile_assignment = lhs->sharding().tile_assignment(); if (tile_assignment.dim(dnums.input_feature_dimension()) > 1) { return false; } return MaybeImproveInstructionSharding(get_tiled_sharding_based_on_lhs(), instruction, may_combine_partial_sharding); } return MaybeImproveInstructionSharding( hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( lhs->sharding(), {dnums.input_batch_dimension()}), instruction, may_combine_partial_sharding); } std::optional<HloSharding> InferBroadcastOperandSharding( const HloInstruction& instruction, bool is_spmd) { if (instruction.sharding().IsReplicated() || instruction.sharding().IsManual()) { return instruction.sharding(); } std::vector<int64_t> dims_to_replicate; bool needs_replication = false; for (int64_t i = 0; i < instruction.shape().rank(); ++i) { if (absl::c_count(instruction.dimensions(), i) == 0) { dims_to_replicate.push_back(i); if (instruction.sharding().tile_assignment().dim(i) > 1) { needs_replication = true; } } } if (!is_spmd && needs_replication) { return std::nullopt; } return hlo_sharding_util::RemoveShapeDimensions( hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( instruction.sharding(), dims_to_replicate), dims_to_replicate); } bool InferReduceShardingFromOperand(HloInstruction* instruction, bool may_combine_partial_sharding, bool is_spmd) { auto get_maybe_tuple_sharding = [&](HloSharding sharding) { if (instruction->shape().IsArray()) { return sharding; } std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(), std::move(sharding)); return HloSharding::Tuple(instruction->shape(), tuple); }; auto* reduce = Cast<HloReduceInstruction>(instruction); bool changed = false; for (int64_t i = 0; i != reduce->inputs().size(); ++i) { HloInstruction* operand = reduce->inputs()[i]; if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) { continue; } if (operand->sharding().IsManual()) { changed |= MaybeImproveInstructionSubSharding( operand->sharding(), reduce, {i}, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); continue; } if (operand->sharding().IsReplicated() || (!is_spmd && absl::c_any_of(instruction->dimensions(), [operand](int64_t dim) { return operand->sharding().tile_assignment().dim(dim) > 1; }))) { changed |= MaybeImproveInstructionSharding( get_maybe_tuple_sharding( hlo_sharding_util::ReplicateAllDataDims(operand->sharding())), reduce, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); continue; } auto after_partial_replication = operand->sharding().IsReplicated() ? operand->sharding() : hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( operand->sharding(), reduce->dimensions()); if (after_partial_replication.IsReplicated()) { changed |= MaybeImproveInstructionSharding( get_maybe_tuple_sharding(after_partial_replication), reduce, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); continue; } HloSharding new_sharding = get_maybe_tuple_sharding(hlo_sharding_util::RemoveShapeDimensions( after_partial_replication, reduce->dimensions())); changed |= MaybeImproveInstructionSharding( std::move(new_sharding), reduce, may_combine_partial_sharding, ComputeNonRootUsers(reduce) == 1); } return changed; } absl::StatusOr<bool> ProcessShardingInstruction( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads, bool replace_sharding_with_copy, absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>* unspecified_dims, std::vector<HloSharding>* saved_root_shardings, absl::flat_hash_map<int64_t, HloSharding>* saved_parameter_shardings, absl::flat_hash_map<HloInstruction*, int64_t>* instruction_to_shard_group_id, absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>* shard_group_id_to_shard_as_group, absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>* shard_group_id_to_shard_like_group, const std::vector<bool>* allow_spmd_sharding_propagation_to_parameters_vector, bool remove_unknown_shardings) { bool changed = false; const bool use_shard_group = instruction_to_shard_group_id && shard_group_id_to_shard_as_group && shard_group_id_to_shard_like_group; auto process_shard_group_instruction = [&](HloInstruction* instruction, bool replaced_with_copy) -> absl::StatusOr<bool> { if (replace_sharding_with_copy) { if (use_shard_group && instruction->has_sharding() && instruction->sharding().IsShardGroup()) { if (instruction->IsCustomCall("Sharding")) { CHECK(instruction->operand(0)->opcode() != HloOpcode::kParameter || (allow_spmd_sharding_propagation_to_parameters_vector && allow_spmd_sharding_propagation_to_parameters_vector->size() == module->entry_computation()->num_parameters() && allow_spmd_sharding_propagation_to_parameters_vector->at( instruction->operand(0)->parameter_number()))); } if (instruction->IsCustomCall("Sharding") && !replaced_with_copy) { HloSharding operand_sharding = instruction->operand(0)->has_sharding() ? instruction->operand(0)->sharding() : HloSharding::Unknown(); operand_sharding.SetShardGroup( instruction->sharding().GetShardGroup()); instruction->mutable_operand(0)->set_sharding( std::move(operand_sharding)); return true; } else { const int64_t shard_group_id = instruction->sharding().GetShardGroup().shard_group_id; (*instruction_to_shard_group_id)[instruction] = shard_group_id; if (instruction->sharding().IsShardAs()) { auto& shard_as_group = (*shard_group_id_to_shard_as_group)[shard_group_id]; if (!shard_as_group.empty()) { CHECK(ShapeUtil::SameDimensions( instruction->shape(), (*shard_as_group.begin())->shape())) << "Instruction: " << instruction->ToString() << " has different shape from the shapes of the other " "instructions within the same shard_as group: " << (*shard_as_group.begin())->shape().ToString(); } shard_as_group.insert(instruction); } else { auto& shard_like_group = (*shard_group_id_to_shard_like_group)[shard_group_id]; if (!shard_like_group.empty()) { CHECK(ShapeUtil::SameDimensions( instruction->shape(), (*shard_like_group.begin())->shape())) << "Instruction: " << instruction->ToString() << " has different shape from the shapes of the other " "instructions within the same shard_like group: " << (*shard_like_group.begin())->shape().ToString(); } shard_like_group.insert(instruction); } HloSharding sharding = instruction->sharding(); sharding.ClearShardGroup(); instruction->set_sharding(std::move(sharding)); } } } return false; }; for (HloComputation* computation : module->computations(execution_threads)) { auto instructions = computation->MakeInstructionPostOrder(); for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) { HloInstruction* instruction = *it; if (instruction->IsCustomCall("Sharding")) { TF_RET_CHECK(instruction->has_sharding()) << "Sharding instruction must have a sharding attribute"; VLOG(3) << "ProcessShardingInstruction: " << instruction->ToString(); HloSharding original_sharding = instruction->sharding(); std::vector<int64_t> unspec_dims; TF_RETURN_IF_ERROR(sharding_op_util::ParseAttributes( Cast<HloCustomCallInstruction>(instruction)->opaque(), &unspec_dims)); bool replaced_with_copy = replace_sharding_with_copy && (!original_sharding.IsUnknown() || remove_unknown_shardings || instruction->operand(0)->opcode() == HloOpcode::kParameter); if (replaced_with_copy) { auto copy = computation->AddInstruction(HloInstruction::CreateUnary( instruction->shape(), HloOpcode::kCopy, instruction->mutable_operand(0))); TF_ASSIGN_OR_RETURN( std::ignore, computation->ReplaceInstruction( instruction, copy, false, false, false)); copy->set_sharding(std::move(original_sharding)); instruction = copy; changed = true; } TF_ASSIGN_OR_RETURN( bool shard_group_remove_instruction, process_shard_group_instruction(instruction, replaced_with_copy)); if (!unspec_dims.empty()) { absl::c_sort(unspec_dims); unspecified_dims->emplace(instruction, std::move(unspec_dims)); } else if (!instruction->operand(0)->has_sharding()) { instruction->mutable_operand(0)->set_sharding( instruction->sharding()); } if (shard_group_remove_instruction) { TF_ASSIGN_OR_RETURN(std::ignore, computation->ReplaceInstruction( instruction, instruction->mutable_operand(0), false, false, false)); } } else { TF_ASSIGN_OR_RETURN(std::ignore, process_shard_group_instruction( instruction, false)); } } } HloInstruction* root_instr = module->entry_computation()->root_instruction(); if (saved_root_shardings != nullptr && root_instr->shape().IsTuple() && root_instr->has_sharding()) { saved_root_shardings->reserve( root_instr->sharding().tuple_elements().size()); for (const HloSharding& sharding : root_instr->sharding().tuple_elements()) { saved_root_shardings->push_back(sharding); } } if (saved_parameter_shardings != nullptr) { auto params = module->entry_computation()->parameter_instructions(); for (int64_t i = 0; i < params.size(); ++i) { if (params[i]->has_sharding()) { saved_parameter_shardings->insert({i, params[i]->sharding()}); } } } return changed; } int64_t ComputeNonRootUsers(const HloInstruction* instr) { int64_t non_root_users = instr->users().size(); for (int i = 0; i < instr->users().size(); ++i) { if (instr->users()[i] == instr->parent()->root_instruction()) { --non_root_users; } } return non_root_users; } absl::Status ShardingPropagation::NormalizeDomain( const DomainMetadata::Domain& domain, const DomainMetadata* metadata) { if (metadata != nullptr) { TF_ASSIGN_OR_RETURN(const auto& sharding_metadata, ShardingMetadata::ToShardingMetadata(metadata)); const auto& sharding = sharding_metadata->sharding(); if (sharding != nullptr) { bool is_spatially_partitioned = !sharding->HasUniqueDevice(); if (sharding->IsTuple()) { is_spatially_partitioned = absl::c_any_of( sharding->tuple_elements(), [](const HloSharding& s) { return !s.HasUniqueDevice(); }); } if (is_spatially_partitioned) { for (HloInstruction* d : domain.exit_domains) { HloInstruction* operand = d->mutable_operand(0); if (!operand->has_sharding() || operand->sharding() != *sharding) { HloSharding operand_sharding = *sharding; if (operand->shape().IsTuple() && !sharding->IsTuple()) { operand_sharding = HloSharding::SingleTuple(operand->shape(), *sharding); } operand->set_sharding(std::move(operand_sharding)); } } return absl::OkStatus(); } } } return ShardingMetadata::NormalizeShardingDomain(domain, metadata); } std::optional<HloSharding> ShardingPropagation::GetShardingFromUser( const HloInstruction& instruction, const HloInstruction& user, int64_t aggressiveness, bool is_spmd, const CallGraph& call_graph, const CustomCallShardingHelper* sharding_helper) { if (!CanPropagateThroughAtAggressiveLevel(user, aggressiveness)) { return std::nullopt; } if (!hlo_sharding_util::IsSpatiallyPartitioned(&user)) { return std::nullopt; } const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0; switch (user.opcode()) { case HloOpcode::kBroadcast: { return InferBroadcastOperandSharding(user, is_spmd); } case HloOpcode::kConcatenate: { if (aggressiveness == 0) { return std::nullopt; } if (user.sharding().IsReplicated()) { return user.sharding(); } const int64_t cdim = user.concatenate_dimension(); auto& tile_assignment = user.sharding().tile_assignment(); if (tile_assignment.dim(cdim) == 1) { return user.sharding(); } if (is_spmd) { return user.sharding(); } int64_t start_offset = 0; for (HloInstruction* op : user.operands()) { if (op == &instruction) { break; } start_offset += op->shape().dimensions(cdim); } const int64_t tile_shape = CeilOfRatio( user.shape().dimensions(cdim), tile_assignment.dimensions()[cdim]); std::vector<int64_t> start_indices(tile_assignment.num_dimensions()); std::vector<int64_t> end_indices(tile_assignment.dimensions().begin(), tile_assignment.dimensions().end()); start_indices[cdim] = start_offset / tile_shape; end_indices[cdim] = CeilOfRatio( start_offset + instruction.shape().dimensions(cdim), tile_shape); auto new_tile_assignment = tile_assignment.array().Slice(start_indices, end_indices); if (new_tile_assignment.num_elements() == 1) { return HloSharding::AssignDevice(*new_tile_assignment.begin(), user.sharding().metadata()); } return HloSharding::Tile(std::move(new_tile_assignment), user.sharding().metadata()); } case HloOpcode::kConvolution: { auto dot_dims = dot_as_convolution_util::ParseConvolutionDimsInfo(&user); if (dot_dims.conv_spatial_dims.empty()) { int64_t op_idx = user.operand_index(&instruction); return hlo_sharding_util::InferDotOperandSharding( &user, op_idx, dot_dims, true, may_combine_partial_sharding); } return std::nullopt; } case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: { if (aggressiveness == 0) { return std::nullopt; } if (user.sharding().IsReplicated()) { return user.sharding(); } if (user.opcode() == HloOpcode::kDynamicUpdateSlice && &instruction == user.operand(0)) { return user.sharding(); } const HloInstruction* operand = user.opcode() == HloOpcode::kDynamicSlice ? user.operand(0) : user.operand(1); if (&instruction != operand) { return std::nullopt; } std::vector<int64_t> slice_dims; for (int64_t i = 0; i < user.shape().rank(); ++i) { if (user.shape().dimensions(i) != operand->shape().dimensions(i)) { slice_dims.push_back(i); } } return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( user.sharding(), slice_dims); } case HloOpcode::kReduceWindow: { auto* reduce_window = Cast<HloReduceWindowInstruction>(&user); if (!absl::c_linear_search(reduce_window->inputs(), &instruction)) { return std::nullopt; } if (reduce_window->shape().IsTuple()) { auto sub_sharding = reduce_window->sharding().GetSubSharding( reduce_window->shape(), {reduce_window->operand_index(&instruction)}); return sub_sharding; } return reduce_window->sharding(); } case HloOpcode::kReshape: { return hlo_sharding_util::PropagateShardingThroughReshape( user.shape(), instruction.shape(), user.sharding()); } case HloOpcode::kPad: { if (&instruction != user.operand(0)) { return std::nullopt; } return user.sharding(); } case HloOpcode::kSlice: { return user.sharding(); } case HloOpcode::kTranspose: { std::vector<int64_t> reverse_dimensions(user.dimensions().size()); for (int64_t i = 0; i < user.dimensions().size(); ++i) { reverse_dimensions[user.dimensions(i)] = i; } return hlo_sharding_util::TransposeSharding(user.sharding(), reverse_dimensions); } case HloOpcode::kTuple: { auto sub_sharding = user.sharding().GetSubSharding( user.shape(), {user.operand_index(&instruction)}); for (int64_t i = 0; i < user.shape().tuple_shapes_size(); ++i) { if (user.operand(i) == &instruction) { HloSharding alternative_sub_sharding = user.sharding().GetSubSharding(user.shape(), {i}); if (hlo_sharding_util::IsShardingMoreSpecific( alternative_sub_sharding, sub_sharding)) { sub_sharding = alternative_sub_sharding; } } } return sub_sharding; } case HloOpcode::kGetTupleElement: { int64_t sharding_index = 0; for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) { if (i == user.tuple_index()) { break; } if (instruction.shape().tuple_shapes(i).IsArray()) { sharding_index += 1; } else { sharding_index += ShapeUtil::GetLeafCount(instruction.shape().tuple_shapes(i)); } } auto base_instruction_sharding = [&](const HloSharding& user_sharding) { if (instruction.has_sharding()) { return instruction.sharding(); } else { std::vector<HloSharding> shardings; ShapeUtil::ForEachSubshape( instruction.shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { if (ShapeUtil::IsLeafIndex(instruction.shape(), index)) { shardings.push_back(hlo_sharding_util::ReplicateAllDataDims( user_sharding, sub_shape.dimensions_size())); } }); return HloSharding::Tuple(instruction.shape(), shardings); } }; if (user.shape().IsArray()) { HloSharding new_sharding = base_instruction_sharding(user.sharding()); new_sharding.tuple_elements()[sharding_index] = user.sharding(); return new_sharding; } else { if (user.sharding().tuple_elements().empty()) { return std::nullopt; } HloSharding new_sharding = base_instruction_sharding(user.sharding().tuple_elements()[0]); for (int64_t i = 0; i < user.sharding().tuple_elements().size(); ++i) { new_sharding.tuple_elements()[sharding_index + i] = user.sharding().tuple_elements()[i]; } return new_sharding; } } case HloOpcode::kDot: { int64_t op_idx = user.operand_index(&instruction); auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(&user); return hlo_sharding_util::InferDotOperandSharding( &user, op_idx, dnums, true, may_combine_partial_sharding); } case HloOpcode::kReduce: { if (instruction.shape().rank() == 0) { return std::nullopt; } auto user_sharding = user.shape().IsTuple() ? user.sharding().GetSubSharding( user.shape(), {user.operand_index(&instruction)}) : user.sharding(); if (!user_sharding.IsTileMaximal()) { std::vector<int64_t> target_tile_assignment_dimensions( instruction.shape().rank() + (user_sharding.ReplicateOnLastTileDim() ? 1 : 0) + user_sharding.subgroup_types().size()); const auto& dimensions = user.dimensions(); int64_t next_output_dim = 0; for (int64_t i = 0; i < target_tile_assignment_dimensions.size(); ++i) { if (absl::c_find(dimensions, i) == dimensions.end()) { target_tile_assignment_dimensions[i] = user_sharding.tile_assignment().dim(next_output_dim++); } else { target_tile_assignment_dimensions[i] = 1; } } auto tile_assignment = user_sharding.tile_assignment().Reshape( target_tile_assignment_dimensions); user_sharding = user_sharding.ReplicateOnLastTileDim() ? HloSharding::PartialTile(tile_assignment, user_sharding.metadata()) : HloSharding::Subgroup(tile_assignment, user_sharding.subgroup_types(), user_sharding.metadata()); } const auto* reduce = Cast<const HloReduceInstruction>(&user); for (const HloInstruction* operand : reduce->inputs()) { if (operand != &instruction && operand->has_sharding()) { hlo_sharding_util::MergeShardingIfCompatible(operand->sharding(), &user_sharding); } } return user_sharding; } case HloOpcode::kSort: { HloSharding user_sharding = user.sharding(); if (user_sharding.IsTuple()) { return user_sharding.GetSubSharding(user.shape(), {user.operand_index(&instruction)}); } return user_sharding; } case HloOpcode::kReverse: { return hlo_sharding_util::ReverseSharding(user.sharding(), user.dimensions()); } case HloOpcode::kOutfeed: { if (&instruction != user.operand(0)) { return std::nullopt; } std::vector<Shape> operand_shapes(user.operand_count()); for (int i = 0; i < user.operand_count(); ++i) { operand_shapes[i] = user.operand(i)->shape(); } return user.sharding().GetSubSharding( ShapeUtil::MakeTupleShape(operand_shapes), {0}); } case HloOpcode::kGather: { if (&instruction == user.operand(1)) { return hlo_sharding_util:: GatherIndexShardingFromOutputIndexPassthroughDimensions( user.sharding(), &user); } if (is_spmd) { return hlo_sharding_util::GatherOperandShardingFromOutput( user.sharding(), user, call_graph); } return std::nullopt; } case HloOpcode::kScatter: { auto& scatter_user = *Cast<HloScatterInstruction>(&user); const int64_t operand_count = scatter_user.scatter_operand_count(); auto scatter_operands = scatter_user.scatter_operands(); auto scatter_indices = scatter_user.scatter_indices(); auto scatter_updates = scatter_user.scatter_updates(); const int64_t operand_index = absl::c_find(scatter_operands, &instruction) - scatter_operands.cbegin(); if (operand_index < operand_count) { return user.sharding().IsTuple() ? user.sharding().GetSubSharding( user.shape(), {operand_index}) : user.sharding(); } if (&instruction == scatter_indices) { std::vector<const HloInstruction*> partitioned_updates; for (const HloInstruction* update : scatter_updates) { if (hlo_sharding_util::IsSpatiallyPartitioned(update)) { partitioned_updates.push_back(update); } } if (partitioned_updates.empty()) { return std::nullopt; } std::vector<HloSharding> shardings; absl::c_transform( partitioned_updates, std::back_inserter(shardings), [&scatter_user](const HloInstruction* update) { return hlo_sharding_util:: ScatterIndexShardingFromUpdateIndexPassthroughDimensions( update->sharding(), &scatter_user); }); return hlo_sharding_util::FindCommonSharding(shardings); } const int64_t update_index = absl::c_find(scatter_updates, &instruction) - scatter_updates.cbegin(); CHECK_LE(update_index, operand_count); auto from_indices = hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices) ? hlo_sharding_util:: ScatterUpdateShardingFromIndexIndexPassthroughDimensions( scatter_indices->sharding(), &scatter_user) : HloSharding::Replicate(); if (is_spmd) { auto from_output = hlo_sharding_util::ScatterUpdateShardingFromOutput( user.sharding().IsTuple() ? user.sharding().GetSubSharding(user.shape(), {update_index}) : user.sharding(), scatter_user, call_graph); if (from_output.has_value()) { hlo_sharding_util::MergeShardingIfCompatible(from_indices, &*from_output); if (!from_output->IsTileMaximal()) { return from_output; } } } if (!from_indices.IsTileMaximal()) { return from_indices; } return std::nullopt; } case HloOpcode::kCustomCall: { bool compatible_shapes = ShapeUtil::CompatibleIgnoringElementType( instruction.shape(), user.shape()); if (!compatible_shapes) { return std::nullopt; } if (!sharding_helper) { return user.sharding(); } if (sharding_helper->CanPropagateShardingToOperands(&user)) { return user.sharding(); } return std::nullopt; } default: { if (ShapeUtil::CompatibleIgnoringElementType(instruction.shape(), user.shape())) { return user.sharding(); } return std::nullopt; } } } bool AggressiveConcatOperandShardingCanPassThrough( const HloInstruction* concat_operand) { return ( hlo_sharding_util::IsSpatiallyPartitioned(concat_operand) && (concat_operand->has_sharding() && concat_operand->sharding().NumTiles() > 1) && concat_operand->opcode() == HloOpcode::kReshape && (concat_operand->operand(0)->opcode() == HloOpcode::kParameter || concat_operand->operand(0)->opcode() == HloOpcode::kGetTupleElement)); } bool InferDynamicUpdateSliceShardingFromOperand1( HloInstruction* instruction, bool may_combine_partial_sharding) { CHECK(instruction->opcode() == HloOpcode::kDynamicSlice || instruction->opcode() == HloOpcode::kDynamicUpdateSlice); const HloInstruction* operand = instruction->opcode() == HloOpcode::kDynamicSlice ? instruction->operand(0) : instruction->operand(1); if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } CHECK(!operand->sharding().IsManual()); std::vector<int64_t> slice_dims; for (int64_t i = 0; i < instruction->shape().rank(); ++i) { if (instruction->shape().dimensions(i) != operand->shape().dimensions(i)) { slice_dims.push_back(i); } } return MaybeImproveInstructionSharding( hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( operand->sharding(), slice_dims), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } bool InferDynamicUpdateSliceShardingFromOperand0( HloInstruction* instruction, bool may_combine_partial_sharding) { CHECK_EQ(instruction->opcode(), HloOpcode::kDynamicUpdateSlice); if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) { return false; } return MaybeImproveInstructionSharding(instruction->operand(0)->sharding(), instruction, may_combine_partial_sharding); } bool ShardingPropagation::InferShardingFromShardGroup( HloInstruction* instruction, int64_t aggressiveness, const absl::flat_hash_set<HloInstruction*>& shard_group) { if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) { return false; } if (instruction->has_sharding() && instruction->sharding().IsManual()) { return false; } if (instruction->IsCustomCall(spmd::kShardBarrierTo)) { return false; } if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) { for (const HloInstruction* member : shard_group) { if (!member->has_sharding() || !member->sharding().IsManual() || member == instruction) { continue; } instruction->set_sharding(member->sharding()); return true; } } const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0; bool changed = false; for (const HloInstruction* member : shard_group) { if (member == instruction || member->IsCustomCall(spmd::kShardBarrierFrom)) { continue; } changed |= MaybeImproveInstructionSharding(member->sharding(), instruction, may_combine_partial_sharding); } return changed; } bool ShardingPropagation::InferShardingFromOperands( HloInstruction* instruction, const ComputationMap& computation_map, int64_t aggressiveness, const CallGraph& call_graph, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) { return false; } if (instruction->has_sharding() && instruction->sharding().IsManual()) { return false; } const bool custom_call_condition = instruction->opcode() == HloOpcode::kCustomCall && instruction->shape().IsTuple(); const bool async_instr_condition = instruction->IsAsynchronous() && !HloInstruction::IsThreadIncluded(instruction->async_execution_thread(), execution_threads); if ((!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) && (instruction->shape().IsArray() || instruction->opcode() == HloOpcode::kReduce || instruction->opcode() == HloOpcode::kSort || instruction->opcode() == HloOpcode::kReduceWindow || custom_call_condition || async_instr_condition)) { for (const HloInstruction* op : instruction->operands()) { if (!op->has_sharding() || !op->sharding().IsManual()) continue; if (instruction->IsCustomCall("SPMDShardToFullShape")) { return false; } if (aggressiveness == 0 && (instruction->opcode() == HloOpcode::kConcatenate || instruction->opcode() == HloOpcode::kDynamicSlice)) { return false; } instruction->set_sharding( HloSharding::Manual(op->sharding().metadata()) .NormalizeTupleSharding(instruction->shape())); return true; } } const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0; if (!SupportSpatialPartitioning( instruction, computation_map, is_spmd_, allow_spmd_sharding_propagation_to_output_, false, sharding_helper_.get())) { if (instruction->shape().IsTuple() || instruction->operand_count() == 0 || instruction == instruction->parent()->root_instruction() || instruction->HasSideEffect()) { return false; } for (const HloInstruction* op : instruction->operands()) { if (op->has_sharding() && op->sharding().IsTileMaximal() && !op->sharding().HasUniqueDevice()) { return MaybeImproveInstructionSharding(op->sharding(), instruction, may_combine_partial_sharding); } } return false; } auto get_maybe_tuple_sharding = [&](HloSharding sharding) { if (instruction->shape().IsArray()) { return sharding; } std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(), std::move(sharding)); return HloSharding::Tuple(instruction->shape(), tuple); }; switch (instruction->opcode()) { case HloOpcode::kGetTupleElement: { const HloInstruction* operand = instruction->operand(0); if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } HloSharding new_sharding = operand->sharding().GetSubSharding( operand->shape(), {instruction->tuple_index()}); if (new_sharding.IsManual()) { instruction->set_sharding(std::move(new_sharding)); return true; } return MaybeImproveInstructionSharding( std::move(new_sharding), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kTuple: { if (absl::c_none_of( instruction->operands(), [](const HloInstruction* hlo) { return hlo_sharding_util::IsSpatiallyPartitioned(hlo); })) { return false; } const Shape& shape = instruction->shape(); std::vector<HloSharding> sub_shardings; if (instruction->has_sharding()) { sub_shardings = instruction->sharding().tuple_elements(); } else { sub_shardings.assign(HloSharding::RequiredLeaves(shape), HloSharding::Replicate()); } auto is_more_specific = [instruction](const HloSharding& operand_sharding, const HloSharding& existing) { return !instruction->has_sharding() || hlo_sharding_util::IsShardingMoreSpecific(operand_sharding, existing); }; int64_t sub_sharding_index = 0; for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { const HloInstruction* operand = instruction->operand(i); if (operand->has_sharding()) { if (operand->shape().IsTuple()) { for (int64_t j = 0, e = ShapeUtil::GetLeafCount(operand->shape()); j < e; ++j) { if (is_more_specific(operand->sharding().tuple_elements()[j], sub_shardings[sub_sharding_index + j])) { sub_shardings[sub_sharding_index + j] = operand->sharding().tuple_elements()[j]; } } } else { std::optional<HloSharding> op_sharding = hlo_sharding_util::GetOutputSharding(operand); CHECK(op_sharding.has_value()) << "Expected sharding for " << operand->ToString(); if (is_more_specific(op_sharding.value(), sub_shardings[sub_sharding_index])) { sub_shardings[sub_sharding_index] = op_sharding.value(); } } } sub_sharding_index += ShapeUtil::GetLeafCount(operand->shape()); } HloSharding new_sharding = HloSharding::Tuple(shape, sub_shardings); if (!instruction->has_sharding() || new_sharding != instruction->sharding()) { instruction->set_sharding(std::move(new_sharding)); return true; } return false; } case HloOpcode::kReduce: { return InferReduceShardingFromOperand( instruction, may_combine_partial_sharding, is_spmd_); } case HloOpcode::kBroadcast: { if (aggressiveness < 3) { return false; } const HloInstruction* op = instruction->operand(0); if (!hlo_sharding_util::IsSpatiallyPartitioned(op) || op->sharding().IsReplicated()) { return false; } std::vector<int64_t> target_tile_assignment_dimensions; const auto& dimensions = instruction->dimensions(); for (int64_t i = 0; i < instruction->shape().rank(); ++i) { auto it = absl::c_find(dimensions, i); if (it == dimensions.end()) { target_tile_assignment_dimensions.push_back(1); } else { const int64_t source_dim = std::distance(dimensions.begin(), it); target_tile_assignment_dimensions.push_back( op->sharding().tile_assignment().dim(source_dim)); } } for (int64_t i = op->sharding().TiledDataRank(); i < op->sharding().tile_assignment().num_dimensions(); ++i) { target_tile_assignment_dimensions.push_back( op->sharding().tile_assignment().dim(i)); } auto new_tile_assignment = op->sharding().tile_assignment().Reshape( target_tile_assignment_dimensions); HloSharding new_sharding = op->sharding().ReplicateOnLastTileDim() ? HloSharding::PartialTile(new_tile_assignment, op->sharding().metadata()) : HloSharding::Subgroup(new_tile_assignment, op->sharding().subgroup_types(), op->sharding().metadata()); return MaybeImproveInstructionSharding( std::move(new_sharding), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kConcatenate: { const HloInstruction* operand = PickRepresentativeOperand(instruction); if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } if (aggressiveness == 0) { for (const HloInstruction* concat_operand : instruction->operands()) { if (!AggressiveConcatOperandShardingCanPassThrough(concat_operand)) { return false; } const auto& tile_assignment = concat_operand->sharding().tile_assignment(); for (int64_t i = 0; i < instruction->shape().rank(); ++i) { if (absl::c_linear_search(instruction->dimensions(), i) && tile_assignment.dim(i) > 1) { return false; } } } } return MaybeImproveInstructionSharding( operand->sharding(), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kConvolution: return InferConvolutionShardingFromOperands( instruction, call_graph, aggressiveness, may_combine_partial_sharding, is_spmd_); case HloOpcode::kTranspose: { const HloInstruction* input = instruction->operand(0); if (!hlo_sharding_util::IsSpatiallyPartitioned(input)) { return false; } HloSharding sharding = hlo_sharding_util::TransposeSharding( input->sharding(), instruction->dimensions()); return MaybeImproveInstructionSharding( std::move(sharding), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kReduceWindow: { auto* reduce_window = Cast<HloReduceWindowInstruction>(instruction); auto has_dilation = [](const WindowDimension& dimensions) { return dimensions.base_dilation() > 1 || dimensions.window_dilation() > 1; }; if (absl::c_any_of(instruction->window().dimensions(), has_dilation)) { VLOG(2) << "Not applying sharding to reduce window because dilatation " "isn't supported yet: " << reduce_window->ToString(); return false; } bool changed = false; for (HloInstruction* operand : reduce_window->inputs()) { if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) { continue; } changed |= MaybeImproveInstructionSharding( get_maybe_tuple_sharding(operand->sharding()), reduce_window, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } return changed; } case HloOpcode::kSelectAndScatter: { const HloInstruction* lhs = instruction->operand(0); if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) { return false; } auto has_base_dilation = [](const WindowDimension& dimensions) { return dimensions.base_dilation() > 1; }; if (absl::c_any_of(instruction->window().dimensions(), has_base_dilation)) { VLOG(2) << "Not applying sharding to select-and-scatter because " "base dilation isn't supported yet: " << instruction->ToString(); return false; } return MaybeImproveInstructionSharding( lhs->sharding(), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kReshape: { if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) { return false; } HloSharding new_sharding = hlo_sharding_util::PropagateShardingThroughReshape( instruction->operand(0)->shape(), instruction->shape(), instruction->operand(0)->sharding()); return MaybeImproveInstructionSharding( std::move(new_sharding), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); return false; } case HloOpcode::kReverse: { const HloInstruction* operand = instruction->operand(0); if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } return MaybeImproveInstructionSharding( hlo_sharding_util::ReverseSharding(operand->sharding(), instruction->dimensions()), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } case HloOpcode::kDot: { const auto& dnums = dot_as_convolution_util::ParseDotGeneralFromDot(instruction); return InferDotShardingFromOperands(instruction, call_graph, dnums, may_combine_partial_sharding, is_spmd_); } case HloOpcode::kParameter: { auto parent_it = computation_map.find(instruction->parent()); if (parent_it == computation_map.end()) { return false; } const HloInstruction* parent = parent_it->second; switch (parent->opcode()) { case HloOpcode::kConditional: { for (int64_t i = 1; i < parent->operand_count(); ++i) { if (parent->called_computations()[i - 1] == instruction->parent()) { if (parent->operand(i)->has_sharding()) { return MaybeImproveInstructionSharding( parent->operand(i)->sharding(), instruction, may_combine_partial_sharding); } return false; } } return false; } case HloOpcode::kCall: { int64_t i = instruction->parameter_number(); if (parent->operand(i)->has_sharding()) { return MaybeImproveInstructionSharding( parent->operand(i)->sharding(), instruction, may_combine_partial_sharding); } return false; } default: return false; } } case HloOpcode::kSort: { const HloInstruction* operand = PickRepresentativeOperand(instruction); if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } HloSortInstruction* sort = DynCast<HloSortInstruction>(instruction); CHECK(sort); const int64_t sort_dim = sort->sort_dimension(); if (!operand->sharding().IsTileMaximal() && operand->sharding().tile_assignment().dim(sort_dim) != 1) { if (!hlo_sharding_util::IsSortOperandShardingMovable(operand, sort_dim)) return false; } if (instruction->shape().IsTuple()) { return MaybeImproveInstructionSharding( HloSharding::SingleTuple(instruction->shape(), operand->sharding()), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } else { return MaybeImproveInstructionSharding( operand->sharding(), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } } case HloOpcode::kDynamicSlice: { return InferDynamicUpdateSliceShardingFromOperand1( instruction, may_combine_partial_sharding); } case HloOpcode::kDynamicUpdateSlice: { bool changed = InferDynamicUpdateSliceShardingFromOperand1( instruction, may_combine_partial_sharding); changed |= InferDynamicUpdateSliceShardingFromOperand0( instruction, may_combine_partial_sharding); return changed; } case HloOpcode::kGather: { bool changed = false; const GatherDimensionNumbers& dnums = instruction->gather_dimension_numbers(); if (!dnums.operand_batching_dims().empty()) { hlo_sharding_util::GatherScatterParallelDims explict_batch_dims; explict_batch_dims.operand_parallel_dims.assign( dnums.operand_batching_dims().begin(), dnums.operand_batching_dims().end()); explict_batch_dims.indices_parallel_dims.assign( dnums.start_indices_batching_dims().begin(), dnums.start_indices_batching_dims().end()); changed |= InferGatherParallelShardingFromOperands( instruction, explict_batch_dims, may_combine_partial_sharding); } if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) { HloSharding new_sharding = hlo_sharding_util:: GatherOutputShardingFromIndexIndexPassthroughDimensions( instruction->operand(1)->sharding(), instruction); changed |= MaybeImproveInstructionSharding( std::move(new_sharding), instruction, may_combine_partial_sharding); } if (is_spmd_) { auto gather_parallel_dims = hlo_sharding_util::GetGatherParallelBatchDims(*instruction, call_graph); if (gather_parallel_dims) { changed |= InferGatherParallelShardingFromOperands( instruction, *gather_parallel_dims, may_combine_partial_sharding); } if (hlo_sharding_util::IsSpatiallyPartitioned( instruction->operand(0))) { absl::Span<const int64_t> operand_parallel_dims; if (gather_parallel_dims) { operand_parallel_dims = absl::MakeConstSpan( gather_parallel_dims->operand_parallel_dims); } HloSharding filtered_operand_sharding = hlo_sharding_util::PartiallyReplicateTiledShardingOnDims( instruction->operand(0)->sharding(), operand_parallel_dims); auto maybe_from_data = hlo_sharding_util:: GatherOutputShardingFromOperandOperandPassthroughDimensions( filtered_operand_sharding, *instruction); if (maybe_from_data) { changed |= MaybeImproveInstructionSharding( std::move(*maybe_from_data), instruction, may_combine_partial_sharding); } } } return changed; } case HloOpcode::kScatter: { auto& scatter = *Cast<HloScatterInstruction>(instruction); bool changed = false; const ScatterDimensionNumbers& dnums = instruction->scatter_dimension_numbers(); if (!dnums.input_batching_dims().empty()) { hlo_sharding_util::GatherScatterParallelDims explict_batch_dims; explict_batch_dims.operand_parallel_dims.assign( dnums.input_batching_dims().begin(), dnums.input_batching_dims().end()); explict_batch_dims.indices_parallel_dims.assign( dnums.scatter_indices_batching_dims().begin(), dnums.scatter_indices_batching_dims().end()); changed |= InferScatterParallelShardingFromOperands( instruction, explict_batch_dims, may_combine_partial_sharding); } const int64_t operand_count = scatter.scatter_operand_count(); auto scatter_operands = scatter.scatter_operands(); auto scatter_indices = scatter.scatter_indices(); auto scatter_updates = scatter.scatter_updates(); if (is_spmd_) { for (int64_t i = 0; i != operand_count; ++i) { if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) { changed |= MaybeImproveInstructionSubSharding( scatter_operands[i]->sharding(), instruction, {i}, may_combine_partial_sharding); } } if (!hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices) && absl::c_none_of(scatter_updates, [](const HloInstruction* update) { return hlo_sharding_util::IsSpatiallyPartitioned(update); })) { return changed; } if (auto scatter_parallel_dims = hlo_sharding_util::GetScatterParallelBatchDims(*instruction, call_graph)) { changed |= InferScatterParallelShardingFromOperands( instruction, *scatter_parallel_dims, may_combine_partial_sharding); } for (int64_t i = 0; i != operand_count; ++i) { if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) { auto maybe_from_update = hlo_sharding_util::ScatterOutputShardingFromUpdate( scatter_updates[i]->sharding(), scatter); if (maybe_from_update) { changed |= MaybeImproveInstructionSubSharding( std::move(*maybe_from_update), instruction, {i}, may_combine_partial_sharding); } } } } else { for (int64_t i = 0; i != operand_count; ++i) { changed |= MaybeImproveInstructionSubSharding( HloSharding::Replicate(), instruction, {i}, may_combine_partial_sharding); } } return changed; } case HloOpcode::kWhile: { if (!instruction->operand(0)->has_sharding()) { return false; } auto sharding = instruction->operand(0)->sharding(); if (instruction->has_sharding()) { hlo_sharding_util::MergeSharding(instruction->sharding(), &sharding, may_combine_partial_sharding); } return MaybeImproveInstructionSharding(std::move(sharding), instruction, may_combine_partial_sharding); } case HloOpcode::kCustomCall: { HloSharding inferred_operand_sharding = HloSharding::Replicate(); if (auto* partitioner = GetCustomCallPartitioner(instruction->custom_call_target()); partitioner && partitioner->IsCustomCallShardable(instruction)) { if (auto sharding = partitioner->InferShardingFromOperands(instruction)) { inferred_operand_sharding = *sharding; } else { return false; } } else if (sharding_helper_->IsCustomCallShardable(instruction)) { if (auto sharding = sharding_helper_->InferShardingFromOperands(instruction)) { inferred_operand_sharding = *sharding; } else { return false; } } else { const HloInstruction* operand = PickRepresentativeOperand(instruction); if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } inferred_operand_sharding = operand->sharding(); } return MaybeImproveInstructionSharding( inferred_operand_sharding, instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } default: { if (instruction->IsElementwise() && may_combine_partial_sharding) { bool changed = false; for (auto operand : instruction->operands()) { if (hlo_sharding_util::IsSpatiallyPartitioned(operand)) { if (instruction->opcode() == HloOpcode::kRng) { changed |= MaybeImproveInstructionSharding( hlo_sharding_util::ReplicateAllDataDims( operand->sharding(), instruction->shape().rank()), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); continue; } changed |= MaybeImproveInstructionSharding( operand->sharding(), instruction, may_combine_partial_sharding, instruction->operands().size() == 1 && ComputeNonRootUsers(instruction) == 1); } } return changed; } const HloInstruction* operand = PickRepresentativeOperand(instruction); if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) { return false; } return MaybeImproveInstructionSharding( operand->sharding(), instruction, may_combine_partial_sharding, ComputeNonRootUsers(instruction) == 1); } } return false; } bool ShardingPropagation::InferShardingFromUsers( HloInstruction* instruction, const ShardingPropagation::ComputationMap& computation_map, int64_t aggressiveness, bool is_spmd, const CustomCallShardingHelper* sharding_helper, const CallGraph& call_graph) { if (aggressiveness < 2 && instruction->opcode() == HloOpcode::kBroadcast) { return false; } if (instruction->has_sharding() && instruction->sharding().IsManual()) { return false; } if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) { for (const HloInstruction* user : instruction->users()) { if (!user->has_sharding() || user->IsCustomCall("SPMDFullToShardShape")) continue; if (instruction->shape().IsArray() && user->sharding().IsManual()) { instruction->set_sharding( HloSharding::Manual(user->sharding().metadata())); return true; } else { std::optional<HloSharding> user_sharding = ShardingPropagation::GetShardingFromUser( *instruction, *user, aggressiveness, is_spmd, call_graph, sharding_helper); if (user_sharding && user_sharding->IsManual()) { instruction->set_sharding(std::move(*user_sharding)); return true; } } } } if (!SupportSpatialPartitioning( instruction, computation_map, is_spmd, false, allow_spmd_sharding_propagation_to_parameters_, sharding_helper)) { return false; } bool improved_sharding = false; const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0; for (const HloInstruction* user : instruction->users()) { if (user->opcode() == HloOpcode::kRngBitGenerator) { instruction->set_sharding(HloSharding::Replicate()); return true; } std::optional<HloSharding> user_sharding = ShardingPropagation::GetShardingFromUser(*instruction, *user, aggressiveness, is_spmd, call_graph, sharding_helper); if (user_sharding && instruction->opcode() == HloOpcode::kCustomCall) { if (auto* partitioner = GetCustomCallPartitioner(instruction->custom_call_target())) { if (partitioner->IsCustomCallShardable(instruction)) { user_sharding = partitioner->PropagateUserSharding(instruction, user, *user_sharding); } } else if (sharding_helper->IsCustomCallShardable(instruction)) { user_sharding = sharding_helper->PropagateUserSharding( instruction, user, *user_sharding); } } if (user_sharding) { improved_sharding |= MaybeImproveInstructionSharding( std::move(*user_sharding), instruction, may_combine_partial_sharding); } } return improved_sharding; } void ShardingPropagation::MaybeComputationPropagation( const ComputationMap& computation_map, const absl::flat_hash_set<const HloInstruction*>& provided_shardings, HloInstruction* instruction, absl::flat_hash_set<HloInstruction*>* changed) { auto propagate_to_instruction = [&](HloInstruction* search_inst) { auto related_instructions = GetRelatedInstructions(search_inst, computation_map); if (absl::c_count(related_instructions, instruction)) { for (HloInstruction* inst : related_instructions) { if ((!inst->has_sharding() || inst->sharding() != instruction->sharding()) && !provided_shardings.contains(inst)) { VLOG(2) << "Add computation sharding: " << inst->name() << " " << instruction->sharding().ToString(); inst->copy_sharding(instruction); changed->insert(inst); MaybeComputationPropagation(computation_map, provided_shardings, inst, changed); } } } }; if (instruction->opcode() == HloOpcode::kConditional || instruction->opcode() == HloOpcode::kWhile || instruction->opcode() == HloOpcode::kCustomCall || instruction->opcode() == HloOpcode::kCall) { propagate_to_instruction(instruction); } if (instruction->opcode() == HloOpcode::kParameter || instruction->parent()->root_instruction() == instruction) { auto it = computation_map.find(instruction->parent()); if (it != computation_map.end()) { propagate_to_instruction(it->second); if (instruction->opcode() == HloOpcode::kParameter && (it->second->opcode() == HloOpcode::kConditional || it->second->opcode() == HloOpcode::kCall)) { propagate_to_instruction(instruction); } } } } absl::StatusOr<bool> ShardingPropagation::RunToFixPoint( int64_t aggressiveness, bool propagate_shard_group, const ComputationMap& computation_map, const absl::flat_hash_set<const HloInstruction*>& provided_shardings, const CallGraph& call_graph, HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads, absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>& unspecified_dims, absl::flat_hash_map<HloInstruction*, int64_t>& instruction_to_shard_group_id, absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>& shard_group_id_to_shard_as_group, absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>& shard_group_id_to_shard_like_group, int64_t& iterations) { bool changed = false; absl::flat_hash_set<const HloInstruction*> already_inferred_from_shard_group; absl::flat_hash_set<const HloInstruction*> already_inferred_from_operands; absl::flat_hash_set<const HloInstruction*> already_inferred_from_users; bool changed_last_iter = true; const bool may_merge_partial = is_spmd_ && aggressiveness > 0; while (changed_last_iter) { changed_last_iter = false; int64_t inferred_from_shard_group_counter = 0; int64_t inferred_from_operand_counter = 0; int64_t inferred_from_user_counter = 0; int64_t instruction_counter = 0; int64_t already_sharded_counter = 0; for (const HloComputation* computation : module->computations(execution_threads)) { VLOG(2) << "Consider computation: " << computation->name(); std::vector<HloInstruction*> instructions = computation->MakeInstructionPostOrder(); instruction_counter += instructions.size(); already_sharded_counter += absl::c_count_if( instructions, [](const HloInstruction* inst) { return inst->has_sharding(); }); auto clear_cache = [&](HloInstruction* hlo, HloInstruction* hlo_for_users = nullptr) { for (auto operand : hlo->operands()) { already_inferred_from_users.erase(operand); } if (hlo_for_users == nullptr) { hlo_for_users = hlo; } for (auto user : hlo_for_users->users()) { already_inferred_from_operands.erase(user); for (auto c : user->called_computations()) { for (auto parameter : c->parameter_instructions()) { already_inferred_from_operands.erase(parameter); } } } if (instruction_to_shard_group_id.contains(hlo)) { const int64_t shard_group_id = instruction_to_shard_group_id.at(hlo); const absl::flat_hash_set<HloInstruction*>& shard_group = shard_group_id_to_shard_as_group.contains(shard_group_id) ? shard_group_id_to_shard_as_group.at(shard_group_id) : shard_group_id_to_shard_like_group.at(shard_group_id); for (HloInstruction* member : shard_group) { if (member != hlo) { already_inferred_from_shard_group.erase(member); } } } }; if (propagate_shard_group) { for (HloInstruction* instruction : instructions) { if (already_inferred_from_shard_group.contains(instruction)) { continue; } if (!instruction_to_shard_group_id.contains(instruction)) { continue; } const int64_t shard_group_id = instruction_to_shard_group_id.at(instruction); const absl::flat_hash_set<HloInstruction*>& shard_group = shard_group_id_to_shard_as_group.contains(shard_group_id) ? shard_group_id_to_shard_as_group.at(shard_group_id) : shard_group_id_to_shard_like_group.at(shard_group_id); if (provided_shardings.contains(instruction)) { if (!may_merge_partial) { continue; } auto it = unspecified_dims.find(instruction); if (it != unspecified_dims.end() && InferUnspecifiedDimsFromShardGroup(instruction, it->second, shard_group)) { ++inferred_from_shard_group_counter; VLOG(2) << "Refined partial sharding (shard group): " << instruction->ToString(); clear_cache(instruction); already_inferred_from_shard_group.insert(instruction); changed_last_iter = true; } continue; } already_inferred_from_shard_group.insert(instruction); if (InferShardingFromShardGroup(instruction, aggressiveness, shard_group)) { ++inferred_from_shard_group_counter; changed = true; VLOG(2) << "Add sharding (shard group): " << instruction->ToString(); absl::flat_hash_set<HloInstruction*> changed_in_comp_prop; MaybeComputationPropagation(computation_map, provided_shardings, instruction, &changed_in_comp_prop); clear_cache(instruction); for (auto hlo : changed_in_comp_prop) { clear_cache(hlo); } changed_last_iter = true; } } } for (HloInstruction* instruction : instructions) { if (already_inferred_from_operands.contains(instruction)) { continue; } if (provided_shardings.contains(instruction)) { if (!may_merge_partial) { continue; } auto it = unspecified_dims.find(instruction); HloInstruction* man_conversion_op_after; if (it != unspecified_dims.end() && InferUnspecifiedDimsFromOperand(instruction, it->second, &man_conversion_op_after)) { ++inferred_from_operand_counter; VLOG(2) << "Refined partial sharding (forward-pass): " << instruction->ToString(); clear_cache(instruction, man_conversion_op_after); already_inferred_from_operands.insert(instruction); changed_last_iter = true; } continue; } already_inferred_from_operands.insert(instruction); if (InferShardingFromOperands(instruction, computation_map, aggressiveness, call_graph, execution_threads)) { ++inferred_from_operand_counter; changed = true; VLOG(2) << "Add sharding (forward-pass): " << instruction->ToString(); absl::flat_hash_set<HloInstruction*> changed_in_comp_prop; MaybeComputationPropagation(computation_map, provided_shardings, instruction, &changed_in_comp_prop); clear_cache(instruction); for (auto hlo : changed_in_comp_prop) { clear_cache(hlo); } changed_last_iter = true; } } for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) { if ((*it)->IsCustomCall("SPMDFullToShardShape") || (*it)->IsCustomCall("SPMDShardToFullShape")) { if (!already_inferred_from_users.contains(*it)) { already_inferred_from_users.erase((*it)->operand(0)); } } if (already_inferred_from_users.contains(*it)) { continue; } if (provided_shardings.contains(*it)) { if (!may_merge_partial) { continue; } auto uit = unspecified_dims.find(*it); HloInstruction* man_conversion_op_after; if (uit != unspecified_dims.end() && InferUnspecifiedDimsFromUsers(*it, uit->second, aggressiveness, is_spmd_, &man_conversion_op_after, call_graph)) { ++inferred_from_user_counter; VLOG(2) << "Refined partial sharding (backward-pass): " << (*it)->ToString(); clear_cache(*it, man_conversion_op_after); already_inferred_from_users.insert(*it); if (man_conversion_op_after != nullptr) { already_inferred_from_users.insert(man_conversion_op_after); } changed_last_iter = true; } continue; } already_inferred_from_users.insert(*it); if (InferShardingFromUsers(*it, computation_map, aggressiveness, is_spmd_, sharding_helper_.get(), call_graph)) { ++inferred_from_user_counter; changed = true; VLOG(2) << "Add sharding (backward-pass): " << (*it)->ToString(); absl::flat_hash_set<HloInstruction*> changed_in_comp_prop; MaybeComputationPropagation(computation_map, provided_shardings, *it, &changed_in_comp_prop); clear_cache(*it); for (auto hlo : changed_in_comp_prop) { clear_cache(hlo); } changed_last_iter = true; } } } VLOG(1) << "Sharding propagation iteration " << iterations << ";" << "\n total instructions: " << instruction_counter << "\n instructions already sharded: " << already_sharded_counter << "\n shardings inferred from shard group: " << inferred_from_shard_group_counter << "\n shardings inferred from operands: " << inferred_from_operand_counter << "\n shardings inferred from users: " << inferred_from_user_counter << "\n aggressiveness: " << aggressiveness; ++iterations; } return changed; } std::vector<HloInstruction*> ShardingPropagation::GetRelatedInstructions( HloInstruction* inst, const ComputationMap& computation_map) { if (inst->opcode() == HloOpcode::kWhile) { return std::vector<HloInstruction*>{ inst, inst->while_body()->root_instruction(), inst->while_body()->parameter_instruction(0), inst->while_condition()->parameter_instruction(0)}; } else if (inst->opcode() == HloOpcode::kConditional) { const auto& called_computations = inst->called_computations(); std::vector<HloInstruction*> comps; comps.reserve(called_computations.size() + 1); comps.push_back(inst); for (HloComputation* c : called_computations) { comps.push_back(c->root_instruction()); } return comps; } else if (inst->opcode() == HloOpcode::kCustomCall) { if (sharding_helper_ && sharding_helper_->IsCustomCallShardable(inst)) { return sharding_helper_->GetRelatedInstructions(inst); } else { return std::vector<HloInstruction*>{}; } } else if (inst->opcode() == HloOpcode::kCall) { HloComputation* callee = inst->called_computations().front(); return std::vector<HloInstruction*>{inst, callee->root_instruction()}; } else if (inst->opcode() == HloOpcode::kParameter) { auto it = computation_map.find(inst->parent()); if (it != computation_map.end()) { if (it->second->opcode() == HloOpcode::kConditional) { HloInstruction* cond = it->second; for (int64_t i = 1; i < cond->operand_count(); ++i) { if (cond->called_computations()[i - 1] == inst->parent()) { return std::vector<HloInstruction*>{inst, cond->mutable_operand(i)}; } } } if (it->second->opcode() == HloOpcode::kCall) { HloInstruction* call = it->second; int64_t operand_index = inst->parameter_number(); CHECK_LT(operand_index, call->operand_count()); return std::vector<HloInstruction*>{ inst, call->mutable_operand(operand_index)}; } } return std::vector<HloInstruction*>{}; } else { CHECK(false); } }; absl::StatusOr<bool> ShardingPropagation::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { ABSL_CONST_INIT static absl::once_flag did_registration; absl::call_once(did_registration, [] { RegisterCustomCallPartitioner( spmd::kShardBarrierFrom, std::make_unique<spmd::ShardBarrierFromPartitioner>()); RegisterCustomCallPartitioner( spmd::kShardBarrierTo, std::make_unique<spmd::ShardBarrierToPartitioner>()); }); std::optional<absl::flat_hash_map<const HloInstruction*, HloSharding>> original_sharding; bool any_changed = false; if (cse_prevention_only_) { original_sharding.emplace(); for (auto computation : module->computations(execution_threads)) { for (auto instruction : computation->instructions()) { if (instruction->has_sharding()) { original_sharding->emplace(instruction, instruction->sharding()); } } } } else { for (auto computation : module->computations(execution_threads)) { for (auto instruction : computation->instructions()) { if (instruction->has_sharding() && IsCSEPreventionSharding(instruction->sharding())) { instruction->clear_sharding(); any_changed = true; } } } } any_changed |= propagate_metadata_ ? AssignShardingMetadata(module, execution_threads) : RemoveShardingMetadata(module, execution_threads); absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>> unspecified_dims; std::vector<HloSharding> saved_root_shardings; absl::flat_hash_map<int64_t, HloSharding> saved_parameter_shardings; absl::flat_hash_map<HloInstruction*, int64_t> instruction_to_shard_group_id; absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>> shard_group_id_to_shard_as_group; absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>> shard_group_id_to_shard_like_group; TF_ASSIGN_OR_RETURN( bool changed, ProcessShardingInstruction( module, execution_threads, !cse_prevention_only_, &unspecified_dims, allow_spmd_sharding_propagation_to_output_ ? &saved_root_shardings : nullptr, allow_spmd_sharding_propagation_to_parameters_ ? &saved_parameter_shardings : nullptr, &instruction_to_shard_group_id, &shard_group_id_to_shard_as_group, &shard_group_id_to_shard_like_group, &allow_spmd_sharding_propagation_to_parameters_vector_)); any_changed |= changed; for (const auto& [shard_group_id, shard_as_group] : shard_group_id_to_shard_as_group) { VLOG(5) << "Shard-As group " << shard_group_id << " contains:"; for (auto instruction : shard_as_group) { VLOG(5) << " " << instruction->ToString(); } } for (const auto& [shard_group_id, shard_like_group] : shard_group_id_to_shard_like_group) { VLOG(5) << "Shard-Like group " << shard_group_id << " contains:"; for (auto instruction : shard_like_group) { VLOG(5) << " " << instruction->ToString(); } } if (allow_spmd_sharding_propagation_to_output_) { CHECK(!module->entry_computation()->root_instruction()->has_sharding() || allow_spmd_sharding_propagation_to_output_vector_.size() == 1 || module->entry_computation() ->root_instruction() ->sharding() .tuple_elements() .size() == allow_spmd_sharding_propagation_to_output_vector_.size()) << "allow-spmd-sharding-propagation-to-output-vector's size can be " "either 1 or the number of elements in the root tuple of entry " "computation."; } if (allow_spmd_sharding_propagation_to_parameters_) { auto is_same_sized_tuple = [](HloModule* module, int64_t size) { if (module->entry_computation()->num_parameters() != 1) { return false; } HloInstruction* param = module->entry_computation()->parameter_instruction(0); return param->shape().IsTuple() && size == param->shape().tuple_shapes_size(); }; auto size = allow_spmd_sharding_propagation_to_parameters_vector_.size(); CHECK(size == 1 || size == module->entry_computation()->num_parameters() || is_same_sized_tuple(module, size)) << "allow-spmd-sharding-propagation-to-parameters-vector's size can be " "either 1 or the number of parameters in the entry computation."; } ComputationMap computation_map; absl::flat_hash_set<const HloInstruction*> provided_shardings; for (auto computation : module->computations(execution_threads)) { for (auto instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kWhile) { TF_RETURN_IF_ERROR( CheckAndUpdateDeviceAssignmentsInWhileBody(instruction)); } } } for (auto computation : module->computations(execution_threads)) { for (auto instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kWhile || instruction->opcode() == HloOpcode::kConditional || instruction->opcode() == HloOpcode::kCall) { const HloInstruction* sharded_inst = nullptr; auto related_instructions = GetRelatedInstructions(instruction, computation_map); for (auto inst : related_instructions) { if (inst->has_sharding()) { sharded_inst = inst; break; } } if (sharded_inst != nullptr) { for (auto inst : related_instructions) { inst->copy_sharding(sharded_inst); } } if (instruction->opcode() == HloOpcode::kWhile) { computation_map[instruction->while_body()] = instruction; computation_map[instruction->while_condition()] = instruction; } else { for (HloComputation* c : instruction->called_computations()) { computation_map[c] = instruction; } } } } } for (const HloComputation* computation : module->computations(execution_threads)) { for (const HloInstruction* inst : computation->instructions()) { if (inst->has_sharding() && inst != module->entry_computation()->root_instruction() && inst->opcode() != HloOpcode::kParameter && !inst->sharding().IsUnknown()) { provided_shardings.insert(inst); } } } HloInstruction* entry_root = module->entry_computation()->root_instruction(); if (!allow_spmd_sharding_propagation_to_output_ && (!entry_root->has_sharding() || !entry_root->sharding().IsUnknown())) { if (entry_root->opcode() == HloOpcode::kWhile) { HloInstruction* copy = module->entry_computation()->AddInstruction( HloInstruction::CreateUnary(entry_root->shape(), HloOpcode::kCopy, entry_root)); if (entry_root->has_sharding()) { copy->set_sharding(entry_root->sharding()); } module->entry_computation()->set_root_instruction(copy); entry_root = copy; any_changed = true; } provided_shardings.insert(entry_root); } if (!allow_spmd_sharding_propagation_to_parameters_) { for (auto param : module->entry_computation()->parameter_instructions()) { if (param->has_sharding() && !param->sharding().IsUnknown()) { provided_shardings.insert(param); } } } for (HloComputation* computation : module->computations(execution_threads)) { auto instructions = computation->MakeInstructionPostOrder(); for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) { HloInstruction* instruction = *it; if (instruction->has_sharding() && instruction->sharding().IsUnknown()) { instruction->set_sharding( HloSharding::Replicate(instruction->sharding().metadata())); } } } int64_t iterations = 0; std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module); for (int64_t aggressiveness = 0; aggressiveness < 4; ++aggressiveness) { TF_ASSIGN_OR_RETURN( bool changed, RunToFixPoint(aggressiveness, true, computation_map, provided_shardings, *call_graph, module, execution_threads, unspecified_dims, instruction_to_shard_group_id, shard_group_id_to_shard_as_group, shard_group_id_to_shard_like_group, iterations)); any_changed = any_changed || changed; } for (const auto& [shard_as_group_id, shard_as_group] : shard_group_id_to_shard_as_group) { HloSharding default_sharding = HloSharding::Replicate(); std::vector<HloSharding> shardings; for (HloInstruction* instruction : shard_as_group) { if (instruction->has_sharding()) { shardings.push_back(instruction->sharding()); if (!instruction->IsCustomCall(spmd::kShardBarrierFrom) && default_sharding.IsReplicated()) { default_sharding = instruction->sharding(); } } } HloSharding common_sharding = shardings.empty() ? default_sharding : hlo_sharding_util::FindCommonSharding( shardings, default_sharding); VLOG(2) << "Aligning shard group: " << shard_as_group_id << " to sharding:" << common_sharding.ToString(); for (HloInstruction* member : shard_as_group) { if (member->IsCustomCall(spmd::kShardBarrierTo)) { continue; } if (provided_shardings.contains(member)) { auto it = unspecified_dims.find(member); if (it != unspecified_dims.end()) { HloSharding partial_replicated = hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept( common_sharding, it->second); HloSharding sharding = member->sharding(); if (hlo_sharding_util::MergeShardingIfCompatible(partial_replicated, &sharding)) { member->set_sharding(sharding); } } } member->set_sharding(common_sharding); } } for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->IsCustomCall(spmd::kShardBarrierFrom) && instruction_to_shard_group_id.contains(instruction) && shard_group_id_to_shard_as_group.contains( instruction_to_shard_group_id.at(instruction))) { HloSharding sharding = instruction->sharding(); hlo_sharding_util::MergeShardingIfCompatible( instruction->mutable_operand(0)->sharding(), sharding.NumTiles(), &sharding); instruction->mutable_operand(0)->set_sharding(std::move(sharding)); } } } { TF_ASSIGN_OR_RETURN( bool changed, RunToFixPoint(3, true, computation_map, provided_shardings, *call_graph, module, execution_threads, unspecified_dims, instruction_to_shard_group_id, shard_group_id_to_shard_as_group, shard_group_id_to_shard_like_group, iterations)); any_changed = any_changed || changed; } for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->IsCustomCall(spmd::kShardBarrierFrom) && instruction_to_shard_group_id.contains(instruction) && shard_group_id_to_shard_as_group.contains( instruction_to_shard_group_id.at(instruction))) { HloSharding sharding = instruction->sharding(); hlo_sharding_util::MergeShardingIfCompatible( instruction->mutable_operand(0)->sharding(), sharding.NumTiles(), &sharding); instruction->mutable_operand(0)->set_sharding(std::move(sharding)); } if (instruction->IsCustomCall(spmd::kShardBarrierFrom) || instruction->IsCustomCall(spmd::kShardBarrierTo)) { TF_ASSIGN_OR_RETURN(std::ignore, computation->ReplaceInstruction( instruction, instruction->mutable_operand(0), false, false, false)); } } } if (cse_prevention_only_) { for (auto computation : module->computations(execution_threads)) { for (auto instruction : computation->instructions()) { if (!instruction->has_sharding()) { continue; } if (IsCSEPreventionTarget(instruction) && instruction->has_sharding()) { if (!(*original_sharding).contains(instruction)) { instruction->set_sharding( SetCSEPreventionSharding(instruction->sharding())); } continue; } auto it = (*original_sharding).find(instruction); if (it != (*original_sharding).end()) { instruction->set_sharding(it->second); } else { instruction->clear_sharding(); } } } } HloInstruction* root_instruction = module->entry_computation()->root_instruction(); if (saved_root_shardings.size() == allow_spmd_sharding_propagation_to_output_vector_.size() && root_instruction->has_sharding()) { HloSharding root_sharding = root_instruction->sharding(); for (int i = 0; i < saved_root_shardings.size(); ++i) { if (!allow_spmd_sharding_propagation_to_output_vector_[i] && !saved_root_shardings[i].IsUnknown()) { root_sharding.tuple_elements()[i] = saved_root_shardings[i]; } } root_instruction->set_sharding(std::move(root_sharding)); } auto params = module->entry_computation()->parameter_instructions(); if (allow_spmd_sharding_propagation_to_parameters_) { if (allow_spmd_sharding_propagation_to_parameters_vector_.size() == params.size()) { for (int64_t i = 0; i < params.size(); ++i) { if (!allow_spmd_sharding_propagation_to_parameters_vector_[i]) { if (saved_parameter_shardings.contains(i) && !saved_parameter_shardings.at(i).IsUnknown()) { params[i]->set_sharding(saved_parameter_shardings.at(i)); } else { params[i]->clear_sharding(); } } } } else if (params.size() == 1 && saved_parameter_shardings.size() == 1 && params[0]->shape().IsTuple() && params[0]->shape().tuple_shapes_size() == allow_spmd_sharding_propagation_to_parameters_vector_ .size()) { HloSharding param_sharding = params[0]->sharding(); for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) { HloSharding saved_subsharding = saved_parameter_shardings.at(0).GetSubSharding(params[0]->shape(), {i}); if (!allow_spmd_sharding_propagation_to_parameters_vector_[i] && !saved_subsharding.IsUnknown()) { param_sharding.tuple_elements()[i] = saved_subsharding; } } params[0]->set_sharding(std::move(param_sharding)); } } std::function<bool(const Shape&, const HloSharding&)> evenly_partitions = [&evenly_partitions](const Shape& shape, const HloSharding& sharding) -> bool { if (!sharding.IsTiled()) { return true; } if (sharding.IsTileMaximal()) { return sharding.IsReplicated(); } if (sharding.IsTuple()) { for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { if (!evenly_partitions(ShapeUtil::GetTupleElementShape(shape, i), sharding.GetSubSharding(shape, {i}))) { return false; } } } for (int64_t i = 0; i < shape.dimensions_size(); ++i) { if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) { return false; } } return true; }; if (allow_spmd_sharding_propagation_to_output_ && root_instruction->has_sharding()) { if (root_instruction->shape().IsTuple() && allow_spmd_sharding_propagation_to_output_vector_.size() == root_instruction->shape().tuple_shapes_size()) { HloSharding root_sharding = root_instruction->sharding(); for (int64_t i = 0; i < root_instruction->shape().tuple_shapes_size(); ++i) { if (allow_spmd_sharding_propagation_to_output_vector_[i] && !evenly_partitions(root_instruction->shape().tuple_shapes(i), root_sharding.tuple_elements()[i])) { root_sharding.tuple_elements()[i] = HloSharding::Replicate(); } } root_instruction->set_sharding(std::move(root_sharding)); } else if (!root_instruction->shape().IsTuple()) { if (!evenly_partitions(root_instruction->shape(), root_instruction->sharding())) { root_instruction->set_sharding(HloSharding::Replicate()); } } } if (allow_spmd_sharding_propagation_to_parameters_) { if (allow_spmd_sharding_propagation_to_parameters_vector_.size() == params.size()) { for (int64_t i = 0; i < params.size(); ++i) { if (params[i]->has_sharding() && allow_spmd_sharding_propagation_to_parameters_vector_[i] && !evenly_partitions(params[i]->shape(), params[i]->sharding())) { params[i]->set_sharding(HloSharding::Replicate()); } } } else if (params.size() == 1 && params[0]->shape().IsTuple() && params[0]->has_sharding() && params[0]->shape().tuple_shapes_size() == allow_spmd_sharding_propagation_to_parameters_vector_ .size()) { HloSharding param_sharding = params[0]->sharding(); for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) { if (allow_spmd_sharding_propagation_to_parameters_vector_[i] && !evenly_partitions( ShapeUtil::GetSubshapeOneIndex(params[0]->shape(), i), params[0]->sharding().GetSubSharding(params[0]->shape(), {i}))) { param_sharding.tuple_elements()[i] = HloSharding::Replicate(); } } params[0]->set_sharding(std::move(param_sharding)); } } TF_RETURN_IF_ERROR( hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation( module, allow_spmd_sharding_propagation_to_output_, allow_spmd_sharding_propagation_to_parameters_)); VLOG(1) << "Sharding propagation completed after " << iterations << " iterations"; return any_changed; } }
#include "xla/service/sharding_propagation.h" #include <ostream> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_op_metadata.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/hlo/transforms/hlo_constant_splitter.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/protobuf_util.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace op = xla::testing::opcode_matchers; namespace xla { namespace { using ShardingPropagationTest = HloTestBase; void ClearMetadata(HloModule* module) { for (HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->metadata().ByteSizeLong() != 0) { instruction->set_metadata(OpMetadata()); } if (!instruction->has_sharding()) { continue; } instruction->set_sharding(instruction->sharding().WithoutMetadata()); } } } struct MetadataTestParameter { explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata) : propagate_metadata(propagate_metadata), clear_metadata(clear_metadata) {} bool propagate_metadata = false; bool clear_metadata = false; }; struct MetadataTestParameterWithOutput { explicit MetadataTestParameterWithOutput(bool propagate_metadata, bool clear_metadata, bool allow_root_sharding_propagation) : propagate_metadata(propagate_metadata), clear_metadata(clear_metadata), allow_root_sharding_propagation(allow_root_sharding_propagation) {} bool propagate_metadata = false; bool clear_metadata = false; bool allow_root_sharding_propagation = false; }; class ParameterizedMetadataTest : public HloTestBase, public ::testing::WithParamInterface<MetadataTestParameter> {}; class ParameterizedMetadataTestWithOutput : public HloTestBase, public ::testing::WithParamInterface<MetadataTestParameterWithOutput> {}; std::string OpMetadataListToString(absl::Span<const OpMetadata> metadata) { std::vector<std::string> metadata_strings; metadata_strings.reserve(metadata.size()); for (const OpMetadata& element : metadata) { metadata_strings.push_back( absl::StrCat("{", OpMetadataToString(element), "}")); } return absl::StrCat("{", absl::StrJoin(metadata_strings, ", "), "}"); } class HloShardingMetadataMatcher : public ::testing::MatcherInterface<const HloSharding&> { public: explicit HloShardingMetadataMatcher(absl::Span<const OpMetadata> metadata) : metadata_(metadata.begin(), metadata.end()) {} bool MatchAndExplain( const HloSharding& sharding, ::testing::MatchResultListener* listener) const override { if (sharding.metadata().size() != metadata_.size()) { *listener << sharding.ToString(true) << " has incorrect sharding metadata (expected: " << OpMetadataListToString(metadata_) << ")"; return false; } for (int i = 0, e = metadata_.size(); i < e; ++i) { if (!protobuf_util::ProtobufEquals(sharding.metadata()[i], metadata_[i])) { *listener << sharding.ToString(true) << " has incorrect sharding metadata (expected: " << OpMetadataListToString(metadata_) << ")"; return false; } } return true; } void DescribeTo(std::ostream* os) const override { *os << OpMetadataListToString(metadata_); } private: std::vector<OpMetadata> metadata_; }; ::testing::Matcher<const HloSharding&> ShardingMetadata( absl::Span<const OpMetadata> metadata) { return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata)); } OpMetadata CreateMetadata(const std::string& op_name) { OpMetadata metadata; metadata.set_op_name(op_name); return metadata; } INSTANTIATE_TEST_SUITE_P( ShardingPropagation, ParameterizedMetadataTest, ::testing::Values(MetadataTestParameter(false, false), MetadataTestParameter(false, true), MetadataTestParameter(true, false), MetadataTestParameter(true, true)), [](const ::testing::TestParamInfo<MetadataTestParameter>& info) { return absl::StrCat(info.param.propagate_metadata ? "MetadataPropagation" : "NoMetadataPropagation", "_", info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule"); }); INSTANTIATE_TEST_SUITE_P( ShardingPropagation, ParameterizedMetadataTestWithOutput, ::testing::Values(MetadataTestParameterWithOutput( false, false, false), MetadataTestParameterWithOutput( false, true, false), MetadataTestParameterWithOutput( true, false, false), MetadataTestParameterWithOutput( true, true, false), MetadataTestParameterWithOutput( false, false, true), MetadataTestParameterWithOutput( false, true, true), MetadataTestParameterWithOutput( true, false, true), MetadataTestParameterWithOutput( true, true, true)), [](const ::testing::TestParamInfo<MetadataTestParameterWithOutput>& info) { return absl::StrCat( info.param.propagate_metadata ? "MetadataPropagation" : "NoMetadataPropagation", "_", info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule", "_", info.param.allow_root_sharding_propagation ? "PropagateToRoot" : "NoPropagateToRoot"); }); TEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3}, metadata={op_name="test"} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("test")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}}, metadata={op_name="test"} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(false, true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("name")})); } TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(false, true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("name")})); } TEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardingPropagation(false, true) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "param0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } TEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1) %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1) ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1) %add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1) ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048,2048]{2,1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} %broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2} ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); } } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048,2048]{2,1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} %shard-barrier-from = f32[3,2048,2048]{2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%shard-barrier-from), dimensions={0,1,2} ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[13]{0} parameter(0) %broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BroadcastBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[13]{0} parameter(0) %param0_copy = f32[13]{0} copy(param0) %shard-barrier-to = f32[13]{0} custom-call(%param0_copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%shard-barrier-to), dimensions={3} ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast), sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "param0_copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, Broadcast1DBackwardNoChange) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = s32[128]{0} parameter(0) %constant0 = s32[] constant(0), sharding={replicated} %broadcast = s32[128]{0} broadcast(%constant0), dimensions={}, sharding={replicated} ROOT %compare = pred[128]{0} compare(s32[128]{0} %param0, s32[128]{0} %broadcast), direction=NE, sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_FALSE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048]parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1} ROOT %copy = f32[3,2048,3] copy(%broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT( module->entry_computation()->root_instruction(), op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); } } TEST_P(ParameterizedMetadataTest, BroadcastMerge) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[3,2048]parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1} ROOT %copy = f32[3,2048,3] copy(%broadcast), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "broadcast"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BroadcastUser) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[24,8]{0,1} parameter(0) %copy = f32[24,8]{0,1} copy(%param0) ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3}, sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) { const char* const hlo_string = R"( HloModule module ENTRY %broadcast { %param0 = f32[24,8]{0,1} parameter(0) %copy = f32[24,8]{0,1} copy(%param0) ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3}, sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[4,2,1,1]0,1,2,3,4,5,6,7}")); } } TEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add ROOT %copy = f32[5,7]{0,1} copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_F(ShardingPropagationTest, ManualTupleReduceForwardPass) { const char* const hlo_string = R"( HloModule module %minmax_func { %lhs_value = f32[] parameter(0) %rhs_value = f32[] parameter(2) %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value) %lhs_index = s32[] parameter(1) %rhs_index = s32[] parameter(3) %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index) ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5) } ENTRY %reduce { get-tuple-element.416 = f32[2,1,128]{2,1,0} parameter(0), sharding={manual} get-tuple-element.417 = s32[2,1,128]{2,1,0} parameter(1), sharding={manual} constant.3793 = f32[] constant(0) constant.3795 = s32[] constant(0) reduce.418 = (f32[2,1]{1,0}, s32[2,1]{1,0}) reduce( get-tuple-element.416, get-tuple-element.417, constant.3793, constant.3795), dimensions={2}, to_apply=minmax_func ROOT %copy = (f32[2,1]{1,0}, s32[2,1]{1,0}) copy(%reduce.418) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce.418"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{manual}, {manual}}")); } TEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReduceForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %reduce = f32[7,11]{1,0} reduce(%shard-barrier-from, %init), dimensions={0,3}, to_apply=%add ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %init = f32[] parameter(1) %reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReducePartiallyBackward) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0) %input = f32[8,8] copy(%param0) %init = f32[] parameter(1) %reduce = f32[8] reduce(%input, %init), dimensions={0}, to_apply=%add, sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReduceBackwardWithBarrier) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce { %param0 = f32[8,8] parameter(0) %input = f32[8,8] copy(%param0) %init = f32[] parameter(1) %shard-barrier-to = f32[8,8] custom-call(%input), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %reduce = f32[8] reduce(%shard-barrier-to, %init), dimensions={0}, to_apply=%add, sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = f32[8] copy(%reduce) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTestWithOutput, ShardedOnNonReduceDimTupleReduceForwardAndBackwardPass) { const char* const hlo_string = R"( HloModule module %minmax_func { %lhs_value = f32[] parameter(0) %rhs_value = f32[] parameter(2) %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value) %lhs_index = s32[] parameter(1) %rhs_index = s32[] parameter(3) %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index) ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5) } ENTRY %main { %param0 = f32[28,10] parameter(0) %param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %copy_param0 = f32[28,10] copy(%param0) %init0 = f32[] parameter(2) %init1 = s32[] parameter(3) %reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1), dimensions={1}, to_apply=%minmax_func %gte0 = f32[28] get-tuple-element(%reduce), index=0 %gte1 = s32[28] get-tuple-element(%reduce), index=1 %copy0 = f32[28] copy(%gte0) %copy1 = s32[28] copy(%gte1) ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* reduce = FindInstruction(module.get(), "reduce"); ASSERT_NE(reduce, nullptr); EXPECT_THAT(reduce, op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}")); auto* copy_param0 = FindInstruction(module.get(), "copy_param0"); ASSERT_NE(copy_param0, nullptr); EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,1]0,1}")); for (const HloSharding& sharding : {copy_param0->sharding(), reduce->sharding().tuple_elements()[0], reduce->sharding().tuple_elements()[1]}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}")); } } TEST_P(ParameterizedMetadataTestWithOutput, ShardedOnReduceDimTupleReduceForwardAndBackwardPass) { const char* const hlo_string = R"( HloModule module %minmax_func { %lhs_value = f32[] parameter(0) %rhs_value = f32[] parameter(2) %compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT %select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value) %lhs_index = s32[] parameter(1) %rhs_index = s32[] parameter(3) %select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index) ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5) } ENTRY %main { %param0 = f32[28,10] parameter(0) %param1 = s32[28,10] parameter(1), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} %copy_param0 = f32[28,10] copy(%param0) %init0 = f32[] parameter(2) %init1 = s32[] parameter(3) %reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1), dimensions={1}, to_apply=%minmax_func %gte0 = f32[28] get-tuple-element(%reduce), index=0 %gte1 = s32[28] get-tuple-element(%reduce), index=1 %copy0 = f32[28] copy(%gte0) %copy1 = s32[28] copy(%gte1) ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* reduce = FindInstruction(module.get(), "reduce"); ASSERT_NE(reduce, nullptr); EXPECT_THAT(reduce, op::Sharding("{{devices=[2,2]0,1,2,3 " "last_tile_dim_replicate},{devices=[2,2]0,1," "2,3 last_tile_dim_replicate}}")); auto* copy_param0 = FindInstruction(module.get(), "copy_param0"); ASSERT_NE(copy_param0, nullptr); EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,2]0,1,2,3}")); for (const HloSharding& sharding : {copy_param0->sharding(), reduce->sharding().tuple_elements()[0], reduce->sharding().tuple_elements()[1]}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[2,2]0,1,2,3 " "last_tile_dim_replicate},{devices=[2,2]0,1,2,3 " "last_tile_dim_replicate}}")); } } TEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %gte { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param0, %param0) %tuple.1 = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple( %param0, %tuple), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}, {replicated metadata={op_name="b"}}, {devices=[1,2,2,1]0,1,2,3 metadata={op_name="c"}}} %gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%tuple.1), index=0 %gte.1 = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) get-tuple-element( %tuple.1), index=1 %gte.2 = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%gte.1), index=0 ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* gte = FindInstruction(module.get(), "gte"); ASSERT_NE(gte, nullptr); EXPECT_THAT(gte, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* gte1 = FindInstruction(module.get(), "gte.1"); ASSERT_NE(gte1, nullptr); EXPECT_THAT(gte1, op::Sharding("{{replicated}, {devices=[1,2,2,1]0,1,2,3}}")); auto* gte2 = FindInstruction(module.get(), "gte.2"); ASSERT_NE(gte2, nullptr); EXPECT_THAT(gte2, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(gte->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(gte1->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(gte1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(gte2->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { for (const HloSharding& sharding : {gte->sharding(), gte1->sharding().tuple_elements()[0], gte1->sharding().tuple_elements()[1], gte2->sharding()}) { EXPECT_THAT(sharding, ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{replicated}")); } } TEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %gte { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param0, %param0), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}, {replicated metadata={op_name="b"}}} %shard-barrier-from = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) custom-call(%tuple), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%shard-barrier-from), index=0 ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* gte = FindInstruction(module.get(), "gte"); ASSERT_NE(gte, nullptr); EXPECT_FALSE(gte->has_sharding()); } TEST_P(ParameterizedMetadataTest, TupleForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}} %param2 = f32[5,7,11,13]{3,2,1,0} parameter(2) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param1, %param2) %tuple.1 = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple( %param0, %tuple) ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) copy( %tuple.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1]0,1,2,3}," " {replicated}}")); auto* tuple1 = FindInstruction(module.get(), "tuple.1"); ASSERT_NE(tuple1, nullptr); EXPECT_THAT(tuple1, op::Sharding("{{replicated}," " {devices=[1,2,2,1]0,1,2,3}," " {replicated}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({})); EXPECT_THAT(tuple1->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(tuple1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(tuple1->sharding().tuple_elements()[2], ShardingMetadata({})); } else { for (const HloSharding& tuple_sharding : {tuple->sharding(), tuple1->sharding()}) { for (const HloSharding& sub_sharding : tuple_sharding.tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } } TEST_P(ParameterizedMetadataTest, TupleForwardPass_SplatBug) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dims={manual} metadata={op_name="b"}} %param2 = f32[5,7,11,13]{3,2,1,0} parameter(2) %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param1, %param2) ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 " "last_tile_dims={manual}}, {replicated}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({})); } else { for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, TupleForwardPassAndBackWardPass) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[256,2]{1,0} parameter(0), sharding={manual metadata={op_name="a"}} %param1 = f32[256,2]{1,0} parameter(1), sharding={devices=[1,2]0,1 metadata={op_name="b"}} %constant = s32[1,2]{1,0} constant({{0,1}}) %gather = f32[1,32,2]{2,1,0} gather(param0, constant), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={32,2} %tuple = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) tuple( %gather, %param1) ROOT %copy = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) copy(%tuple) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{manual}, {devices=[1,2]0,1}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, TupleShapedBackWardPass) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1 %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %zero = u32[] constant(0), sharding={replicated metadata={op_name="a"}} %p0 = f32[] parameter(0), sharding={manual metadata={op_name="b"}} %tuple = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%tuple), body=%body, condition=%cond, sharding={{manual metadata={op_name="c"}}, {manual metadata={op_name="d"}}} ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple"); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("d")})); } else { for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartiallyManualTupleWithRepeatedOperandsBackWardPass) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (s32[], s32[], s32[]) parameter(0) %count.cond = s32[] get-tuple-element(%vars.cond), index=0 %limit = s32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (s32[], s32[], s32[]) parameter(0) %count = s32[] get-tuple-element(%param), index=0 %lhs = s32[] get-tuple-element(%param), index=1 %rhs = s32[] get-tuple-element(%param), index=2 %add = s32[] add(%lhs, %rhs) ROOT %tuple = (s32[], s32[], s32[]) tuple(%count, %lhs, %add) } ENTRY %entry { %zero = s32[] constant(0) %p0 = s32[] parameter(0), sharding={manual metadata={op_name="a"}} %tuple = (s32[], s32[], s32[]) tuple(%zero, %zero, %p0) %while = (s32[], s32[], s32[]) while(%tuple), body=%body, condition=%cond ROOT %copy = (s32[], s32[], s32[]) copy(%while) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = module->entry_computation()->root_instruction()->operand(0); ASSERT_NE(tuple, nullptr); EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}, {manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(tuple->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(tuple->sharding().tuple_elements()[2], ShardingMetadata({CreateMetadata("a")})); } else { for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ForwardConvolutionForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={devices=[2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %rhs = f32[3,3,13,17]{3,2,1,0} parameter(1) %convolution = f32[5,7,11,17]{3,2,1,0} convolution(%lhs, %rhs), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f ROOT %copy = f32[5,7,11,17]{3,2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2,1]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ForwardConvolutionLargeDilationForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[8,64,2]{2,1,0} parameter(0), sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}} %rhs = f32[3,2,2]{2,1,0} parameter(1) %convolution = f32[8,32,2]{2,1,0} convolution(%lhs, %rhs), window={size=3 rhs_dilate=16}, dim_labels=b0f_0io->b0f ROOT %copy = f32[8,32,2]{2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ForwardConvolution3DSmallKernel) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = bf16[32,32,8,7,128]{4,3,2,1,0} parameter(0), sharding={devices=[1,4,1,1,1]0,1,2,3 metadata={op_name="a"}} %rhs = bf16[3,3,3,128,256]{4,3,2,1,0} parameter(1) %convolution = bf16[16,16,8,3,256]{4,3,2,1,0} convolution(bf16[32,32,8,7,128]{4,3,2,1,0} %lhs, bf16[3,3,3,128,256]{4,3,2,1,0} %rhs), window={size=3x3x3 stride=2x2x2 pad=1_1x1_1x0_0}, dim_labels=01b2f_012io->01b2f ROOT %copy = bf16[16,16,8,3,256]{4,3,2,1,0} copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1,1,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TransposeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}} %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0} ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "transpose"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,2,1,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TransposeForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}} %shard-barrier-from = f32[7,11,13]{2,1,0} custom-call(%param), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-from), dimensions={1,2,0} ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "transpose"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, TransposeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0) %copy = f32[7,11,13]{2,1,0} copy(%param) ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0}, sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,2,1,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, TransposeBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0) %copy = f32[7,11,13]{2,1,0} copy(%param) %shard-barrier-to = f32[7,11,13]{2,1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-to), dimensions={1,2,0}, sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, ReshapeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1430,1]{1,0} parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %reshape = f32[10,11,13]{2,1,0} reshape(%param0) ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1430,1]{1,0} parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %shard-barrier-from = f32[1430,1]{1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %reshape = f32[10,11,13]{2,1,0} reshape(%shard-barrier-from) ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[14,32] parameter(0), sharding={devices=[4,4]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 metadata={op_name="a"}} %reshape = f32[7,2,2,16] reshape(%param0) ROOT %copy = f32[7,2,2,16] copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2,4]0,4,8,12,1,5,9,13,2,6,10,14,3," "7,11,15 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch2) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[12,8] parameter(0), sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %reshape = f32[8,12] reshape(%param0) ROOT %copy = f32[8,12] copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeForwardPassTranspose) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[6,4,5] parameter(0), sharding={devices=[6,2,1]<=[12] metadata={op_name="a"}} %reshape.1 = f32[2,3,20] reshape(%param0) %reshape.2 = f32[2,4,3,5] reshape(%param0) %reshape.3 = f32[20,6] reshape(%param0) %reshape.4 = f32[3,5,8] reshape(%param0) %reshape.5 = f32[10,4,3] reshape(%param0) %reshape.6 = f32[5,8,3] reshape(%param0) ROOT %tuple = tuple(%reshape.1, %reshape.2, %reshape.3, %reshape.4, %reshape.5, %reshape.6) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); std::vector<std::pair<std::string, std::string>> instruction_and_sharding = { {"reshape.1", "{devices=[2,3,2]<=[12]}"}, {"reshape.2", "{devices=[2,1,1,1,6]<=[12] last_tile_dim_replicate}"}, {"reshape.3", "{devices=[2,1,6]<=[12] last_tile_dim_replicate}"}, {"reshape.4", "{devices=[3,1,1,4]<=[12] last_tile_dim_replicate}"}, {"reshape.5", "{devices=[2,1,1,6]<=[12] last_tile_dim_replicate}"}, {"reshape.6", "{replicated}"}}; for (const auto& [name, sharding] : instruction_and_sharding) { auto* instruction = FindInstruction(module.get(), name); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding(sharding)); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ReshapeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[2002,1]{1,0} parameter(0) %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0) ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy), sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReshapeBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[2002,1]{1,0} parameter(0) %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0) %shard-barrier-to = f32[2002,1]{1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%shard-barrier-to), sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, PadForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2 ROOT %copy = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "pad"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PadBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0) %copy = f32[11,17]{1,0} copy(%input) %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%copy, %pad_value), padding=2_4_1x1_1_2, sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} ROOT %result = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicatedPadForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %pad { %input = f32[11,17]{1,0} parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %pad_value = f32[] parameter(1) %pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2 ROOT %copy = f32[27,51]{1,0} copy(%pad) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "pad"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ShardedPreferredOverReplicated) { const char* const hlo_string = R"( HloModule module ENTRY %replicated { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0) %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}} %copy.1 = f32[5,7,11,13]{3,2,1,0} copy(%param1) %add = f32[5,7,11,13]{3,2,1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[5,7,11,13]{3,2,1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* copy = FindInstruction(module.get(), "copy"); ASSERT_NE(copy, nullptr); EXPECT_THAT(copy, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* copy1 = FindInstruction(module.get(), "copy.1"); ASSERT_NE(copy1, nullptr); EXPECT_THAT(copy1, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT(add, op::Sharding("{devices=[1,2,2,1]0,1,2,3}")); for (const HloSharding& sharding : {copy->sharding(), copy1->sharding(), add->sharding()}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1430,1]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %reshape = f32[10,11,13]{2,1,0} reshape(%param0) ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[2002,1]{1,0} parameter(0) %copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0) ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy), sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DontShardTuplesIfAllInputIsMaximal) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0), sharding={maximal device=0 metadata={op_name="a"}} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1), sharding={maximal device=1 metadata={op_name="b"}} %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple( %param0, %param1) ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "tuple"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::NoSharding()); } TEST_P(ParameterizedMetadataTest, ValidConvolution) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[13,17,19]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[19,5,19]{2,1,0} parameter(1) %conv = f32[13,13,19]{2,1,0} convolution(%lhs, %rhs), window={size=5}, dim_labels=b0f_i0o->b0f ROOT %tuple = (f32[13,13,19]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, StridedSlice) { const char* const hlo_string = R"( HloModule module ENTRY %slice { %param = f32[17,13]{1,0} parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]} ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "slice"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialReplicatedStridedSlice) { const char* const hlo_string = R"( HloModule module ENTRY %slice { %param = f32[17,13]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]} ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "slice"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPass) { const char* const hlo_string = R"( HloModule module %add (lhs: f32[], rhs: f32[]) -> f32[] { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce_window { %param = f32[13,17]{1,0} parameter(0) %param.copy = f32[13,17]{1,0} copy(%param) %init = f32[] parameter(1) ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%param.copy, %init), window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add, sharding={devices=[2,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* param_copy = FindInstruction(module.get(), "param.copy"); ASSERT_NE(param_copy, nullptr); EXPECT_THAT(param_copy, op::Sharding("{devices=[2,1]0,1}")); auto* reduce_window = FindInstruction(module.get(), "reduce-window"); ASSERT_NE(reduce_window, nullptr); EXPECT_THAT(reduce_window, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(param_copy->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(param_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module %add (lhs: f32[], rhs: f32[]) -> f32[] { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %reduce_window { %param = f32[13,17]{1,0} parameter(0) %param.copy = f32[13,17]{1,0} copy(%param) %init = f32[] parameter(1) %shard-barrier-to = f32[13,17]{1,0} custom-call(%param.copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%shard-barrier-to, %init), window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add, sharding={devices=[2,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* param_copy = FindInstruction(module.get(), "param.copy"); ASSERT_NE(param_copy, nullptr); EXPECT_FALSE(param_copy->has_sharding()); } TEST_P(ParameterizedMetadataTest, VariadicReduceWindowBackwardPass) { const char* const hlo_string = R"( HloModule module %add (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) { %a = f32[] parameter(0) %b = s32[] parameter(1) %c = f32[] parameter(2) %d = s32[] parameter(3) %add.0 = f32[] add(%a, %c) %add.1 = s32[] add(%b, %d) ROOT %t = tuple(%add.0, %add.1) } ENTRY %reduce_window { %param.0 = f32[13,17]{1,0} parameter(0) %param.0.copy = f32[13,17]{1,0} copy(%param.0) %param.1 = s32[13,17]{1,0} parameter(1) %param.1.copy = s32[13,17]{1,0} copy(%param.1) %init.0 = f32[] parameter(2) %init.1 = s32[] parameter(3) ROOT %reduce-window = (f32[7,17]{1,0}, s32[7,17]{1,0}) reduce-window(%param.0.copy, %param.1.copy, %init.0, %init.1), window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add, sharding={{devices=[2,1]0,1 metadata={op_name="a"}}, {devices=[2,1]0,1 metadata={op_name="b"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* param_0_copy = FindInstruction(module.get(), "param.0.copy"); ASSERT_NE(param_0_copy, nullptr); EXPECT_THAT(param_0_copy, op::Sharding("{devices=[2,1]0,1}")); auto* param_1_copy = FindInstruction(module.get(), "param.1.copy"); ASSERT_NE(param_1_copy, nullptr); EXPECT_THAT(param_1_copy, op::Sharding("{devices=[2,1]0,1}")); auto* reduce_window = FindInstruction(module.get(), "reduce-window"); ASSERT_NE(reduce_window, nullptr); EXPECT_THAT(reduce_window, op::Sharding("{{devices=[2,1]0,1}, {devices=[2,1]0,1}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(reduce_window->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(reduce_window->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({})); EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReplicatedConvolutionLhs) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[3,2,3]{2,1,0} parameter(0), sharding={replicated metadata={op_name="a"}} %rhs = f32[2,2,1]{2,1,0} parameter(1) %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs), window={size=1}, dim_labels=bf0_oi0->bf0 ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT(lhs, op::Sharding("{replicated}")); auto* conv = FindInstruction(module.get(), "conv"); ASSERT_NE(conv, nullptr); EXPECT_THAT(conv, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(conv->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(lhs->sharding(), ShardingMetadata({})); EXPECT_THAT(conv->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvolutionShardedFeature) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[3,2,3]{2,1,0} parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[2,2,1]{2,1,0} parameter(1) %conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs), window={size=1}, dim_labels=bf0_oi0->bf0 ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvolutionDifferentDimensionNumbers) { const char* const hlo_string = R"( HloModule module ENTRY conv { %lhs = f32[8,16,512] parameter(0), sharding={devices=[1,2,1]0,1 metadata={op_name="a"}} %rhs = f32[8,2,512] parameter(1) %conv = f32[3,512,512] convolution(%lhs, %rhs), window={size=2 stride=5}, dim_labels=f0b_i0o->0bf ROOT %tuple = (f32[3,512,512]) tuple(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, Concatenate) { const char* const hlo_string = R"( HloModule module ENTRY %concat { %param.0 = f32[5,7] parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %param.1 = f32[5,9] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="b"}} %concat = f32[5,16] concatenate(%param.0, %param.1), dimensions={1} ROOT %tuple = (f32[5,16]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "concat"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConcatenateForwardWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %concat { %param.0 = f32[5,7] parameter(0), sharding={devices=[2,1]0,1 metadata={op_name="a"}} %param.1 = f32[5,9] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="b"}} %shard-barrier-from.0 = f32[5,7] custom-call(%param.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %shard-barrier-from.1 = f32[5,9] custom-call(%param.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %concat = f32[5,16] concatenate(%shard-barrier-from.0, %shard-barrier-from.1), dimensions={1} ROOT %tuple = (f32[5,16]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "concat"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, ConcatenateBackwardWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %concat { %param.0 = f32[5,7] parameter(0) %copy.0 = f32[5,7] copy(%param.0) %param.1 = f32[5,9] parameter(1) %copy.1 = f32[5,9] copy(%param.1) %shard-barrier-to = f32[5,9] custom-call(%copy.1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %concat = f32[5,16] concatenate(%copy.0, %shard-barrier-to), dimensions={1}, sharding={devices=[2,1]0,1 metadata={op_name="a"}} ROOT %tuple = (f32[5,16]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "copy.1"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, TupleBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %tuple { %param.0 = f32[1] parameter(0) %param.1 = f32[3] parameter(1) %copy.0 = f32[1] copy(%param.0) %copy.1 = f32[3] copy(%param.1) ROOT %tuple = (f32[1], f32[3]) tuple(%copy.0, %copy.1), sharding={{replicated metadata={op_name="a"}}, {devices=[2]0,1 metadata={op_name="b"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* copy0 = FindInstruction(module.get(), "copy.0"); ASSERT_NE(copy0, nullptr); EXPECT_THAT(copy0, op::Sharding("{replicated}")); auto* copy1 = FindInstruction(module.get(), "copy.1"); ASSERT_NE(copy1, nullptr); EXPECT_THAT(copy1, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(copy0->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(copy1->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(copy0->sharding(), ShardingMetadata({})); EXPECT_THAT(copy1->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, AllReduce) { const char* const hlo_string = R"( HloModule module %add (lhs: f32[], rhs: f32[]) -> f32[] { %add_lhs = f32[] parameter(0) %add_rhs = f32[] parameter(1) ROOT %add = f32[] add(f32[] %add_lhs, f32[] %add_rhs) } ENTRY %entry { %param.0 = f32[3] parameter(0) %param.1 = f32[3] parameter(1) %copy_f_t = f32[3] copy(%param.1), sharding={devices=[2]0,1 metadata={op_name="a"}} %crs_f.tiled = f32[3] all-reduce(%copy_f_t), to_apply=%add %crs_f.none = f32[3] all-reduce(%copy_f_t), to_apply=%add, channel_id=1 %crs_b.replicated = f32[3] all-reduce(%param.0), to_apply=%add %copy_b_r = f32[3] copy(%crs_b.replicated), sharding={replicated metadata={op_name="b"}} ROOT %tuple = (f32[3], f32[3], f32[3]) tuple( %crs_f.tiled, crs_f.none, %copy_b_r) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* crs_f_tiled = FindInstruction(module.get(), "crs_f.tiled"); ASSERT_NE(crs_f_tiled, nullptr); EXPECT_THAT(crs_f_tiled, op::Sharding("{devices=[2]0,1}")); auto* crs_f_none = FindInstruction(module.get(), "crs_f.none"); ASSERT_NE(crs_f_none, nullptr); EXPECT_THAT(crs_f_none, op::Sharding("{devices=[2]0,1}")); auto* crs_b_replicated = FindInstruction(module.get(), "crs_b.replicated"); ASSERT_NE(crs_b_replicated, nullptr); EXPECT_THAT(crs_b_replicated, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({})); EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, While) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[10,10]) parameter(0) %count.cond = u32[] get-tuple-element((u32[], f32[10,10]) %vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT } %body { %vars = (u32[], f32[10,10]) parameter(0) %count = u32[] get-tuple-element(%vars), index=0 %acc = f32[10,10] get-tuple-element((u32[], f32[10,10]) %vars), index=1 %one = u32[] constant(1) %count.1 = u32[] add(u32[] %count, u32[] %one), sharding={replicated} %acc.1 = f32[10,10] add(f32[10,10] %acc, f32[10,10] %acc) ROOT %tuple = (u32[], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc.1) } ENTRY %entry { %p0 = f32[10,10] parameter(0) %p0.copy = f32[10,10] copy(f32[10,10] %p0) %p1 = f32[10,10] parameter(1) %zero = u32[] constant(0) %init = (u32[], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy) %while = (u32[], f32[10,10]) while((u32[], f32[10,10]) %init), body=%body, condition=%cond %res = f32[10,10] get-tuple-element((u32[], f32[10,10]) %while), index=1 %prev = f32[10,10] get-tuple-element((u32[], f32[10,10]) %init), index=1 %res.1 = f32[10,10] multiply(f32[10,10] %res, %prev) ROOT %res_tuple = (f32[10,10]) tuple(f32[10,10] %res.1) })"; auto while_is_sharded = [this](HloModule* module, const HloSharding& sharding, absl::Span<const absl::Span<const OpMetadata>> sharding_metadata) { if (GetParam().clear_metadata) { ClearMetadata(module); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module)); EXPECT_TRUE(changed); auto while_instr = FindInstruction(module, "while"); EXPECT_NE(nullptr, while_instr); std::vector<const HloInstruction*> instructions{ while_instr, while_instr->while_body()->root_instruction(), while_instr->while_body()->parameter_instruction(0), while_instr->while_condition()->parameter_instruction(0)}; for (auto instr : instructions) { ASSERT_TRUE(instr->has_sharding()); EXPECT_EQ(sharding, instr->sharding()); ASSERT_EQ(instr->sharding().tuple_elements().size(), sharding_metadata.size()); for (int i = 0, e = sharding_metadata.size(); i < e; ++i) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instr->sharding().tuple_elements()[i], ShardingMetadata(sharding_metadata[i])); } else { EXPECT_THAT(instr->sharding().tuple_elements()[i], ShardingMetadata({})); } } } }; { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto body_root = FindInstruction(module.get(), "tuple"); EXPECT_NE(nullptr, body_root); auto sharding = ParseSharding( "{{replicated metadata={op_name=\"b\"}}, " "{devices=[2,1]0,1 metadata={op_name=\"c\"}}}") .value(); body_root->set_sharding(sharding); while_is_sharded(module.get(), sharding.WithoutMetadata(), {{CreateMetadata("b")}, {CreateMetadata("c")}}); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto acc_1 = FindInstruction(module.get(), "acc.1"); EXPECT_NE(nullptr, acc_1); acc_1->set_sharding( ParseSharding("{devices=[2,1]0,1 metadata={op_name=\"b\"}}").value()); while_is_sharded( module.get(), ParseSharding("{{replicated}, {devices=[2,1]0,1}}").value(), {{}, {CreateMetadata("b")}}); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto acc_1 = FindInstruction(module.get(), "acc.1"); EXPECT_NE(nullptr, acc_1); acc_1->set_sharding( ParseSharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate " "metadata={op_name=\"b\"}}") .value()); auto p0 = FindInstruction(module.get(), "p0"); p0->set_sharding( ParseSharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate " "metadata={op_name=\"c\"}}") .value()); while_is_sharded(module.get(), ParseSharding("{{replicated}, " "{devices=[2,2]0,1,2,3}}") .value(), {{}, {CreateMetadata("c"), CreateMetadata("b")}}); } } TEST_F(ShardingPropagationTest, PropagateShardingInWhileCondition) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %vars = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%vars), index=0 %acc = f32[] get-tuple-element(%vars), index=1 %one = u32[] constant(1) %count.1 = u32[] add(u32[] %count, u32[] %one) %acc.1 = f32[] add(f32[] %acc, f32[] %acc) ROOT %tuple = (u32[], f32[]) tuple(%count.1, %acc.1) } ENTRY %entry { %p0 = f32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}} %zero = u32[] constant(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}} %init = (u32[], f32[]) tuple(%zero, %p0) ROOT %while = (u32[], f32[]) while(%init), body=%body, condition=%cond })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, false, {true}) .Run(module.get())); EXPECT_TRUE(changed); HloSharding single_sharding = ParseSharding("{devices=[2,2]<=[4] last_tile_dims={manual, replicated}}") .value(); HloSharding tuple_sharding = HloSharding::SingleTuple( module->entry_computation()->root_instruction()->shape(), single_sharding); for (const HloComputation* computation : module->computations()) { for (const HloInstruction* instruction : computation->instructions()) { EXPECT_TRUE(instruction->has_sharding()); EXPECT_EQ(instruction->sharding(), instruction->shape().IsTuple() ? tuple_sharding : single_sharding); } } } TEST_P(ParameterizedMetadataTest, WhileGetShardingFromRecvInBody) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={{maximal device=1 metadata={op_name="a"}}, {maximal device=1}, {maximal device=1}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto sharding = ParseSharding("{{maximal device=1}, {maximal device=1}}").value(); auto while_instr = FindInstruction(module.get(), "while"); ASSERT_NE(nullptr, while_instr); std::vector<const HloInstruction*> instructions{ while_instr, while_instr->while_body()->root_instruction(), while_instr->while_body()->parameter_instruction(0), while_instr->while_condition()->parameter_instruction(0)}; for (auto instr : instructions) { ASSERT_TRUE(instr->has_sharding()); EXPECT_EQ(sharding, instr->sharding()); for (const HloSharding& sub_sharding : instr->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyBeforeRecv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0, sharding={maximal device=0 metadata={op_name="a"}} %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={{maximal device=1 metadata={op_name="b"}}, {maximal device=1}, {maximal device=1}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().message(), ::testing::HasSubstr( "Instruction: count is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyAfterRecv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={{maximal device=1 metadata={op_name="a"}}, {maximal device=1}, {maximal device=1}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0, sharding={maximal device=0 metadata={op_name="b"}} ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().message(), ::testing::HasSubstr( "Instruction: data is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConflictingShardingOnWhileInstruction) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], f32[]) parameter(0) %count = u32[] get-tuple-element(%param), index=0 %after-all = token[] after-all() %recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1, sharding={{maximal device=1 metadata={op_name="a"}}, {maximal device=1}, {maximal device=1}} %recv-done = (f32[], token[]) recv-done(%recv), channel_id=1 %data = f32[] get-tuple-element(%recv-done), index=0 ROOT %tuple = (u32[], f32[]) tuple(%count, %data) } ENTRY %entry { %p0 = f32[] parameter(0) %zero = u32[] constant(0) %init = (u32[], f32[]) tuple(%zero, %p0) %while = (u32[], f32[]) while(%init), body=%body, condition=%cond, sharding={{maximal device=0 metadata={op_name="b"}},{maximal device=0}} ROOT %result = f32[] get-tuple-element(%while), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } auto result = ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get()); EXPECT_THAT(result.status().message(), ::testing::HasSubstr( "Instruction: while is on device: 0, which conflicts with " "device: 1 of channel instruction: recv")); } TEST_P(ParameterizedMetadataTest, WhileConv) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(2) ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT } %body { %param = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0) %i0 = s32[] constant(0) %count = u32[] get-tuple-element(%param), index=0 %gte0 = bf16[2,2048,768]{2,1,0} get-tuple-element(%param), index=1 %index = s32[] get-tuple-element(%param), index=4 %dys = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0), dynamic_slice_sizes={1,2048,768} %kernel = bf16[2048, 768]{1,0} reshape(%dys) %lhs = bf16[128,512,2048]{2,1,0} get-tuple-element(%param), index=2, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} %reshape = bf16[2048,768,1]{2,1,0} reshape(bf16[2048,768]{1,0} %kernel) %convolution = bf16[128,512,768]{2,1,0} convolution(bf16[128,512,2048]{2,1,0} %lhs, bf16[2048,768,1]{2,1,0} %reshape), window={size=1}, dim_labels=0bf_io0->0bf, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %convolution, index) } ENTRY %entry { %p0 = bf16[2048,768] parameter(0), sharding={devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate} %p1 = bf16[128,512,2048] parameter(1), sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} %p2 = bf16[128,512,768] parameter(2) %reshape0 = bf16[1,2048,768] reshape(%p0) %concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0} %zero = u32[] constant(0) %p3 = s32[] parameter(3) %init = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3) %while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3, sharding={replicated} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* kernel = FindInstruction(module.get(), "kernel"); ASSERT_NE(kernel, nullptr); EXPECT_THAT(kernel, op::Sharding("{devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5," "7,9,11,13,15 last_tile_dim_replicate}")); } TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = bf16[16,2048,768] parameter(0), sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate} %concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0} %add = bf16[32,2048,768] add(%concat, %concat), sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} ROOT %result = bf16[32,2048,768] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* kernel = FindInstruction(module.get(), "concat"); ASSERT_NE(kernel, nullptr); EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8," "9,10,11,12,13,14,15}")); } TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = bf16[16,2048,768] parameter(0), sharding={devices=[1,2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate} %concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0} %add = bf16[32,2048,768] add(%concat, %concat), sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} ROOT %result = bf16[32,2048,768] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* kernel = FindInstruction(module.get(), "concat"); ASSERT_NE(kernel, nullptr); EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8," "9,10,11,12,13,14,15}")); } TEST_P(ParameterizedMetadataTest, DoNotPassThroughDynamicSliceAtFirstIteration) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = bf16[64,2048,768] parameter(0), sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate} %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %dys = bf16[32,2048,768] dynamic-slice(%p0, s32[] %p1, s32[] %i0, s32[] %i0), dynamic_slice_sizes={32,2048,768} %add = bf16[32,2048,768] add(%dys, %dys), sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} ROOT %result = bf16[32,2048,768] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* kernel = FindInstruction(module.get(), "dys"); ASSERT_NE(kernel, nullptr); EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8," "9,10,11,12,13,14,15}")); } TEST_P(ParameterizedMetadataTest, Dot) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %param.0 = f32[8,256,128] parameter(0) %param.1 = f32[8,128,512] parameter(1) %param.2 = f32[8,128] parameter(2) %p0_copy_0 = f32[8,256,128] copy(%param.0), sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}} %p1_copy_0 = f32[8,128,512] copy(%param.1), sharding={devices=[1,1,4]0,1,2,3 metadata={op_name="b"}} %p2_copy = f32[8,128] copy(%param.2) %dot_prop_rhs = f32[8,256,512] dot(%p0_copy_0, %p1_copy_0), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %dot_prop_lhs = f32[8,512,256] dot(%p1_copy_0, %p0_copy_0), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={2} %dot_mat_vec = f32[8,256] dot(%p0_copy_0, %p2_copy), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %p0_copy_1 = f32[8,256,128] copy(%param.0) %p1_copy_1 = f32[8,128,512] copy(%param.1) %dot_back_prop_rhs = f32[8,256,512] dot(%p0_copy_1, %p1_copy_1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %copy_back_prop_rhs = f32[8,256,512] copy(%dot_back_prop_rhs), sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="c"}} ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512]) tuple(%dot_prop_lhs, %dot_prop_rhs, %dot_mat_vec, %copy_back_prop_rhs) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* dot_prop_rhs = FindInstruction(module.get(), "dot_prop_rhs"); ASSERT_NE(dot_prop_rhs, nullptr); EXPECT_THAT(dot_prop_rhs, op::Sharding("{devices=[1,1,4]0,1,2,3}")); auto* dot_prop_lhs = FindInstruction(module.get(), "dot_prop_lhs"); ASSERT_NE(dot_prop_lhs, nullptr); EXPECT_THAT(dot_prop_lhs, op::Sharding("{devices=[1,4,1]0,1,2,3}")); auto* dot_mat_vec = FindInstruction(module.get(), "dot_mat_vec"); ASSERT_NE(dot_mat_vec, nullptr); EXPECT_THAT(dot_mat_vec, op::Sharding("{devices=[1,4]0,1,2,3}")); auto* p0_copy_1 = FindInstruction(module.get(), "p0_copy_1"); ASSERT_NE(p0_copy_1, nullptr); EXPECT_THAT( p0_copy_1, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); auto* p1_copy_1 = FindInstruction(module.get(), "p1_copy_1"); ASSERT_NE(p1_copy_1, nullptr); EXPECT_THAT( p1_copy_1, op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}")); auto* dot_back_prop_rhs = FindInstruction(module.get(), "dot_back_prop_rhs"); ASSERT_NE(dot_back_prop_rhs, nullptr); EXPECT_THAT(dot_back_prop_rhs, op::Sharding("{devices=[1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(dot_prop_rhs->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(dot_prop_lhs->sharding(), ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(dot_mat_vec->sharding(), ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(p0_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(p1_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")})); EXPECT_THAT(dot_back_prop_rhs->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { for (HloInstruction* instruction : {dot_prop_rhs, dot_prop_lhs, dot_mat_vec, p0_copy_1, p1_copy_1, dot_back_prop_rhs}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DotTiledBatchDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0) %p1 = f32[8,512,128] parameter(1) %add = f32[8,256,512] add(%p0, %p0) %dot = f32[8,256,128] dot(%add, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} %res = f32[8,32768] reshape(%dot), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}} ROOT %tuple = (f32[8,32768]) tuple(%res) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,1,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate metadata={op_name="b"}} %dot = f32[8,256,128] dot(%p0, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands2) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="b"}} %dot = f32[8,256,128] dot(%p0, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DotMergeOperands3) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[256,512] parameter(0), sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[128,512] parameter(1), sharding={devices=[4,2]0,4,2,6,3,7,1,5 metadata={op_name="b"}} %dot = f32[256,128] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} ROOT %copy = f32[256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,2,3,1,4,6,7,5}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ForwardDotWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1) %shard-barrier-from = f32[8,256,512] custom-call(%p0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %dot = f32[8,256,128] dot(%shard-barrier-from, %p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "dot"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, BackwardDotWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1) %copy1 = f32[8,128,512] copy(%p1) %shard-barrier-to = f32[8,128,512] custom-call(%copy1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %dot = f32[8,256,128] dot(%p0, %shard-barrier-to), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2}, sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "copy1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, BackwardDotFromContracting) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}} %p1 = f32[8,128,512] parameter(1) %copy1 = f32[8,128,512] copy(%p1) %dot = f32[8,256,128] dot(%p0, %copy1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2}, sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}} ROOT %copy = f32[8,256,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, BackwardDotFromContractingWithManual) { const char* const hlo_string = R"( HloModule module ENTRY %dot { %p0 = f32[8,512] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %p1 = f32[512,128] parameter(1) %copy1 = f32[512,128] copy(%p1) %dot = f32[8,128] dot(%p0, %copy1), lhs_batch_dims={}, rhs_batch_dims={}, lhs_contracting_dims={1}, rhs_contracting_dims={0}, sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="b"}} ROOT %copy = f32[8,128] copy(%dot) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsForward) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[128,1,1,1001] parameter(0), sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[1,1,1024,1001] parameter(1), sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}} %convolution = f32[128,1,1,1024] convolution(%lhs, %rhs), window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f ROOT %copy = f32[128,1,1,1024] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvAsDotForwardWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %lhs = f32[128,1,1,1001] parameter(0), sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[1,1,1024,1001] parameter(1), sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}} %shard-barrier-from = f32[1,1,1024,1001] custom-call(%rhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %convolution = f32[128,1,1,1024] convolution(%lhs, %shard-barrier-from), window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f ROOT %copy = f32[128,1,1,1024] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "convolution"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsBackward) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[128,5,5,128] parameter(0) %lhs = f32[128,5,5,128] copy(%p0) %p1 = f32[5,5,128,768] parameter(1) %rhs = f32[5,5,128,768] copy(%p1) %convolution = f32[128,1,1,768] convolution(%lhs, %rhs), window={size=5x5}, dim_labels=b01f_01io->b01f, sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} ROOT %copy = f32[128,1,1,768] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); for (HloInstruction* instruction : {lhs, rhs}) { EXPECT_THAT(instruction, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConvAsDotBackwardWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[128,5,5,128] parameter(0) %lhs = f32[128,5,5,128] copy(%p0) %p1 = f32[5,5,128,768] parameter(1) %rhs = f32[5,5,128,768] copy(%p1) %shard-barrier-from = f32[128,5,5,128] custom-call(%lhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %convolution = f32[128,1,1,768] convolution(%shard-barrier-from, %rhs), window={size=5x5}, dim_labels=b01f_01io->b01f, sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} ROOT %copy = f32[128,1,1,768] copy(%convolution) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT(lhs, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, ConvolutionFilterIFOFPartitionedInputPartialReplicate) { const char* const hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[128,112,112,12] parameter(0) %lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs), sharding={devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %rhs = f32[7,7,12,64] parameter(1) %rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs), sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="b"}} %conv = f32[128,56,56,64] convolution( f32[128,112,112,12] %lhs.copy, f32[7,7,12,64] %rhs.copy), window={size=7x7 stride=2x2 pad=3_3x3_3}, dim_labels=b01f_01io->b01f ROOT %copy = f32[128,56,56,64] copy(conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConvolutionDataParallelism) { const char* const hlo_string = R"( HloModule module ENTRY entry { p0 = f32[256,512,16,32] parameter(0), sharding={devices=[2,2,2,2]<=[16] metadata={op_name="lhs_sharding"}} p1 = f32[512,1,12,28] parameter(1), sharding={replicated metadata={op_name="rhs_sharding"}} conv = f32[256,512,5,5] convolution(p0, p1), window={size=12x28}, dim_labels=bf01_oi01->bf01, feature_group_count=512 ROOT copy = f32[256,512,5,5] copy(conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[2,1,1,1,8]<=[16] last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("lhs_sharding")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ConcatFromUserUnshardedDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[8,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[8,128] copy(%p1) %concat = f32[16,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[1,2]0,1 metadata={op_name="a"}} ROOT %tuple = (f32[16,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); for (HloInstruction* instruction : {c0, c1}) { EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDim) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[8,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[8,128] copy(%p1) %concat = f32[16,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[3,1]0,1,2 metadata={op_name="a"}} ROOT %tuple = (f32[16,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); EXPECT_THAT(c0, op::Sharding("{devices=[2,1]0,1}")); ASSERT_NE(c0, nullptr); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[2,1]1,2}")); for (HloInstruction* instruction : {c0, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDimMaximalOperand) { const char* const hlo_string = R"( HloModule module ENTRY %conv { %p0 = f32[8,128] parameter(0) %p1 = f32[24,128] parameter(1) %c0 = f32[8,128] copy(%p0) %c1 = f32[24,128] copy(%p1) %concat = f32[32,128] concatenate(%c0, %c1), dimensions={0}, sharding={devices=[4,1]0,1,2,3 metadata={op_name="a"}} ROOT %tuple = (f32[32,128]) tuple(%concat) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT(c0, op::NoSharding()); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT(c1, op::Sharding("{devices=[3,1]1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(c1->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(c1->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ReplicatedToSideEffecting) { const char* const hlo_string = R"( HloModule module ENTRY entry_computation { %const.0 = s32[] constant(0), sharding={replicated metadata={op_name="a"}} %const.1 = s32[] constant(2147483647), sharding={replicated metadata={op_name="b"}} %rng = s32[4]{0} rng(%const.0, %const.1), distribution=rng_uniform ROOT %root = (s32[4]{0}) tuple(%rng) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); EXPECT_EQ(changed, !GetParam().propagate_metadata && !GetParam().clear_metadata); auto* instruction = FindInstruction(module.get(), "rng"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::NoSharding()); } TEST_P(ParameterizedMetadataTest, PartReplicatedTupleUser) { const char* const hlo_string = R"( HloModule module ENTRY entry_computation { %param.0 = f32[5] parameter(0) %param.1 = f32[7] parameter(1) %param.2 = f32[9] parameter(2) %tuple.0 = (f32[5], f32[7]) tuple(%param.0, %param.1) ROOT %tuple.1 = ((f32[5], f32[7]), f32[9]) tuple(%tuple.0, %param.2), sharding={{maximal device=0 metadata={op_name="a"}}, {replicated metadata={op_name="b"}}, {maximal device=1 metadata={op_name="c"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "tuple.0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{maximal device=0}, {replicated}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { for (const HloSharding& sub_sharding : instruction->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, Conditional) { const char* const hlo_string = R"( HloModule module %add-call { %x = f32[4,4] parameter(0) ROOT %add = f32[4,4] add(%x, %x) } %true_comp { %tp = (f32[3,5], f32[4,4]) parameter(0) %tgte.0 = f32[3,5] get-tuple-element(%tp), index=0 %ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0} %tgte.1 = f32[4,4] get-tuple-element(%tp), index=1 %tadd = f32[4,4] call(%tgte.1), to_apply=%add-call ROOT %tr = (f32[5,3], f32[4,4]) tuple(%ttr, %tadd) } %mul-call { %y = f32[4,4] parameter(0) ROOT %mul = f32[4,4] multiply(%y, %y) } %false_comp { %fp = (f32[5,3], f32[4,4]) parameter(0) %fgte.0 = f32[5,3] get-tuple-element(%fp), index=0 %fgte.1 = f32[4,4] get-tuple-element(%fp), index=1 %fmul = f32[4,4] call(%fgte.1), to_apply=%mul-call ROOT %fr = (f32[5,3], f32[4,4]) tuple(%fgte.0, %fmul) } ENTRY entry { %cond = pred[] parameter(0) %tp.0 = f32[3,5] parameter(1), sharding={devices=[1,2]0,1 metadata={op_name="a"}} %fp.0 = f32[5,3] parameter(2), sharding={devices=[1,3]0,1,2 metadata={op_name="b"}} %constant = f32[4] constant({1,2,3,4}), sharding={devices=[4]0,1,2,3 metadata={op_name="c"}} %broadcast = f32[4,4] broadcast(%constant), dimensions={1} %add = f32[4,4] add(%broadcast, %broadcast) %true_param = (f32[3,5], f32[4,4]) tuple(%tp.0, %add) %false_param = (f32[5,3], f32[4,4]) tuple(%fp.0, %add) %conditional = (f32[5,3], f32[4,4]) conditional( %cond, %true_param, %false_param), true_computation=%true_comp, false_computation=%false_comp ROOT %root = f32[5,3] get-tuple-element(%conditional), index=0 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tp = FindInstruction(module.get(), "tp"); auto* tgte_0 = FindInstruction(module.get(), "tgte.0"); auto* ttr = FindInstruction(module.get(), "ttr"); auto* tgte_1 = FindInstruction(module.get(), "tgte.1"); auto* tadd = FindInstruction(module.get(), "tadd"); auto* tr = FindInstruction(module.get(), "tr"); auto* fp = FindInstruction(module.get(), "fp"); auto* fgte_0 = FindInstruction(module.get(), "fgte.0"); auto* fgte_1 = FindInstruction(module.get(), "fgte.1"); auto* fmul = FindInstruction(module.get(), "fmul"); auto* fr = FindInstruction(module.get(), "fr"); auto* x = FindInstruction(module.get(), "x"); auto* add = FindInstruction(module.get(), "add"); auto* y = FindInstruction(module.get(), "y"); auto* mul = FindInstruction(module.get(), "mul"); auto* conditional = FindInstruction(module.get(), "conditional"); const std::vector<HloInstruction*> instructions( {tp, tgte_0, ttr, tgte_1, tadd, tr, fp, fgte_0, fgte_1, fmul, fr, x, add, y, mul, conditional}); for (HloInstruction* instruction : instructions) { EXPECT_NE(instruction, nullptr); EXPECT_TRUE(instruction->has_sharding()); } for (HloInstruction* instruction : {tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) { EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}")); } for (HloInstruction* instruction : {tr, fr, conditional, fp}) { EXPECT_THAT(instruction, op::Sharding("{{devices=[1,3]0,1,2}, {devices=[1,4]0,1,2,3}}")); } EXPECT_THAT(tp, op::Sharding("{{devices=[1,2]0,1}, {devices=[1,4]0,1,2,3}}")); EXPECT_THAT(tgte_0, op::Sharding("{devices=[1,2]0,1}")); EXPECT_THAT(ttr, op::Sharding("{devices=[2,1]0,1}")); EXPECT_THAT(fgte_0, op::Sharding("{devices=[1,3]0,1,2}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { for (HloInstruction* instruction : {tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } for (HloInstruction* instruction : {tr, fr, conditional, fp}) { const std::vector<HloSharding>& shardings = instruction->sharding().tuple_elements(); EXPECT_THAT(shardings[0], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(shardings[1], ShardingMetadata({CreateMetadata("c")})); } for (HloInstruction* instruction : {tgte_0, ttr}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } EXPECT_THAT(fgte_0->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { for (HloInstruction* instruction : instructions) { if (instruction->sharding().IsTuple()) { for (const HloSharding& tuple_element : instruction->sharding().tuple_elements()) { EXPECT_THAT(tuple_element, ShardingMetadata({})); } } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } } TEST_P(ParameterizedMetadataTest, TupleFromUser) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[13] parameter(0) %p1 = f32[15] parameter(1) %p2 = f32[17] parameter(2) %t0 = (f32[13], f32[15]) tuple(%p0, %p1) %t1 = ((f32[13], f32[15]), f32[17]) tuple(%t0, %p2) %gte.0 = (f32[13], f32[15]) get-tuple-element(%t1), index=0 %gte.1 = f32[13] get-tuple-element(%gte.0), index=0 %gte.2 = f32[15] get-tuple-element(%gte.0), index=1 %gte.3 = f32[17] get-tuple-element(%t1), index=1 ROOT %t2 = (f32[13], f32[15], f32[17]) tuple(%gte.1, %gte.2, %gte.3), sharding={{replicated metadata={op_name="a"}}, {devices=[2]0,1 metadata={op_name="b"}}, {devices=[3]1,2,3 metadata={op_name="c"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* t0 = FindInstruction(module.get(), "t0"); ASSERT_NE(t0, nullptr); EXPECT_THAT(t0, op::Sharding("{{replicated}, {devices=[2]0,1}}")); auto* t1 = FindInstruction(module.get(), "t1"); ASSERT_NE(t1, nullptr); EXPECT_THAT( t1, op::Sharding("{{replicated}, {devices=[2]0,1}, {devices=[3]1,2,3}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(t0->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(t0->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(t1->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(t1->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); EXPECT_THAT(t1->sharding().tuple_elements()[2], ShardingMetadata({CreateMetadata("c")})); } else { for (HloInstruction* instruction : {t0, t1}) { for (const HloSharding& sub_sharding : instruction->sharding().tuple_elements()) { EXPECT_THAT(sub_sharding, ShardingMetadata({})); } } } } TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPassWithBarrier) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %ds = f32[11,1,15] dynamic-slice(%shard-barrier-from, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "ds"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}} %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "ds"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15}, sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "c0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPassWithBarrier) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = s32[] parameter(1) %i0 = s32[] constant(0) %shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %ds = f32[11,1,15] dynamic-slice(%shard-barrier-to, %i0, %p1, %i0), dynamic_slice_sizes={11,1,15}, sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} ROOT %root = (f32[11,1,15]) tuple(%ds) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "c0"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassBase) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}} %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0) ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* dus = FindInstruction(module.get(), "dus"); ASSERT_NE(dus, nullptr); EXPECT_THAT(dus, op::Sharding("{devices=[2,2,2]<=[8]}")); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT( c1, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); for (HloInstruction* instruction : {dus, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassWithBarrier) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-from, %c1, %i0, %p2, %i0) ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* dus = FindInstruction(module.get(), "dus"); ASSERT_NE(dus, nullptr); EXPECT_FALSE(dus->has_sharding()); } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassUpdate) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1), sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}} %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0) ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* dus = FindInstruction(module.get(), "dus"); ASSERT_NE(dus, nullptr); EXPECT_THAT( dus, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT( c0, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); for (HloInstruction* instruction : {dus, c0}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPass) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0), sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}} ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_THAT(c0, op::Sharding("{devices=[2,2,2]<=[8]}")); auto* c1 = FindInstruction(module.get(), "c1"); ASSERT_NE(c1, nullptr); EXPECT_THAT( c1, op::Sharding( "{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}")); for (HloInstruction* instruction : {c0, c1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPassWithBarrier) { const char* hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[11,13,15] parameter(0) %c0 = f32[11,13,15] copy(%p0) %p1 = f32[11,1,15] parameter(1) %c1 = f32[11,1,15] copy(%p1) %p2 = s32[] parameter(2) %i0 = s32[] constant(0) %shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-to, %c1, %i0, %p2, %i0), sharding={devices=[1,1,2]0,1 metadata={op_name="a"}} ROOT %root = (f32[11,13,15]) tuple(%dus) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* c0 = FindInstruction(module.get(), "c0"); ASSERT_NE(c0, nullptr); EXPECT_FALSE(c0->has_sharding()); } TEST_P(ParameterizedMetadataTestWithOutput, EinsumLHSBatchPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64] parameter(0) %lhs.copy = f32[32,24,64] copy(%lhs), sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[32,39296,64] parameter(1) %rhs.copy = f32[32,39296,64] copy(%rhs) %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32} ROOT %copy = f32[32,24,39296] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata, {GetParam().allow_root_sharding_propagation}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* rhs_copy = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(rhs_copy, nullptr); EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); auto* conv = FindInstruction(module.get(), "conv"); ASSERT_NE(conv, nullptr); EXPECT_THAT(conv, op::Sharding("{devices=[2,1,1]0,1}")); for (HloInstruction* instruction : {rhs_copy, conv}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } if (GetParam().allow_root_sharding_propagation) { EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,1,1]0,1}")); } } TEST_P(ParameterizedMetadataTest, EinsumOutputBatchPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64] parameter(0) %lhs.copy = f32[32,24,64] copy(%lhs) %rhs = f32[32,39296,64] parameter(1) %rhs.copy = f32[32,39296,64] copy(%rhs) %conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32}, sharding={devices=[2,1,1]0,1 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs_copy = FindInstruction(module.get(), "lhs.copy"); ASSERT_NE(lhs_copy, nullptr); EXPECT_THAT(lhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); auto* rhs_copy = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(rhs_copy, nullptr); EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}")); for (HloInstruction* instruction : {lhs_copy, rhs_copy}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, EinsumLHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,128] parameter(0) %lhs.copy = f32[32,24,64,128] copy(%lhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} %rhs = f32[32,39296,64,1] parameter(1) %rhs.copy = f32[32,39296,64,1] copy(%rhs) %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumOutputLHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,128] parameter(0) %lhs.copy = f32[32,24,64,128] copy(%lhs) %rhs = f32[32,39296,64,1] parameter(1) %rhs.copy = f32[32,39296,64,1] copy(%rhs) ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1}, sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "lhs.copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumRHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs) %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumOutputRHSNonContractingPartitioned) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs) %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs) ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}, sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "rhs.copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumChooseLargerOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs), sharding={devices=[1,4,1,1]0,1,2,3 metadata={op_name="a"}} %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="b"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, EinsumChooseBatchFirst) { const char* hlo_string = R"( HloModule module ENTRY entry { %lhs = f32[32,24,64,1] parameter(0) %lhs.copy = f32[32,24,64,1] copy(%lhs), sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}} %rhs = f32[32,39296,64,128] parameter(1) %rhs.copy = f32[32,39296,64,128] copy(%rhs), sharding={devices=[2,1,1,1]0,1 metadata={op_name="b"}} %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy), dim_labels=0bf1_0oi1->0bf1, window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1} ROOT %copy = f32[32,24,39296,128] copy(%conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "conv"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromIndex) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[2,3,4] parameter(1), sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} %gather = f32[3,4,9] gather(%input, %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,9} ROOT %copy = f32[3,4,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromIndex_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromDataOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1 metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherFromDataOperand_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9} ROOT %copy = f32[3,9] copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1) %indices = s32[3] copy(%p1) ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1) %indices = s32[3] copy(%p1) ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex2) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,1000,2] parameter(1) %indices = s32[2,1000,2] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex2_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,1000,2] parameter(1) %indices = s32[2,1000,2] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToIndex3) { const char* hlo_string = R"( HloModule module ENTRY entry { %input = bf16[2,4819,4] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[2,2,1000] parameter(1) %indices = s32[2,2,1000] copy(%p1) ROOT %gather = bf16[2,1000,4] gather(bf16[2,4819,4] %input, s32[2,2,1000] %indices), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4}, sharding={devices=[1,2,1]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToDataOperand) { const char* hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[1,2]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToDataOperand_PartialReplicate) { const char* hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,9}, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DataOperandToScatter) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2]0,1 metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DataOperandToScatter_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, DataOperandToScatter_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %input.0 = f32[2,9] parameter(0), sharding={devices=[1,4]0,1,2,3 metadata={op_name="a"}} %input.1 = f32[2,9] parameter(1), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} %indices = s32[3] parameter(2), sharding={replicated metadata={op_name="c"}} %updates.0 = f32[3,9] parameter(3), sharding={replicated metadata={op_name="d"}} %updates.1 = f32[3,9] parameter(4), sharding={replicated metadata={op_name="e"}} %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{devices=[1,4]0,1,2,3}, {devices=[1,2,2]0,1,2,3 " "last_tile_dim_replicate}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={devices=[1,2]0,1 metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %updates = f32[3,9] parameter(2), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = f32[2,9] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %input.0 = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %input.1 = f32[2,9] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3] parameter(2), sharding={replicated metadata={op_name="c"}} %updates.0 = f32[3,9] parameter(3), sharding={devices=[1,4]0,1,2,3 metadata={op_name="d"}} %updates.1 = f32[3,9] parameter(4), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}} %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{devices=[1,4] 0,1,2,3}, {devices=[1,2,2]0,1,2,3 " "last_tile_dim_replicate}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("d")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("e")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="b"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToDataOperand) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %p0 = f32[2,9] parameter(0) %input = f32[2,9] copy(%p0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %updates = f32[3,9] parameter(2), sharding={replicated metadata={op_name="b"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2]0,1 metadata={op_name="c"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %p0 = f32[2,9] parameter(0) %input.0 = f32[2,9] copy(%p0) %p1 = f32[2,9] parameter(1) %input.1 = f32[2,9] copy(%p1) %indices = s32[3] parameter(2), sharding={replicated metadata={op_name="a"}} %updates.0 = f32[3,9] parameter(3), sharding={replicated metadata={op_name="b"}} %updates.1 = f32[3,9] parameter(4), sharding={replicated metadata={op_name="c"}} ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={{devices=[1,4]0,1,2,3 metadata={op_name="d"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "input.0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("d")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } instruction = FindInstruction(module.get(), "input.1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("e")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %p2 = f32[3,9] parameter(2) %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1), sharding={replicated metadata={op_name="a"}} %p2 = f32[3,9] parameter(2) %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={devices=[1,2]0,1 metadata={op_name="b"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %input.0 = f32[2,9] parameter(0) %input.1 = f32[2,9] parameter(1) %indices = s32[3] parameter(2), sharding={replicated metadata={op_name="a"}} %p3 = f32[3,9] parameter(3) %updates.0 = f32[3,9] copy(%p3) %p4 = f32[3,9] parameter(4) %updates.1 = f32[3,9] copy(%p4) ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={{devices=[1,4]0,1,2,3 metadata={op_name="b"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates.0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } instruction = FindInstruction(module.get(), "updates.1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3] copy(%p1) %updates = f32[3,9] parameter(2), sharding={devices=[2,1]0,1 metadata={op_name="c"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex2) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[1,3] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[1,3] copy(%p1) %updates = f32[3,9] parameter(2), sharding={devices=[2,1]0,1 metadata={op_name="c"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=0, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[3] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3] copy(%p1) %updates = f32[3,9] parameter(2), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}} ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_RankMismatch) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[1,24,24,24,3,3] parameter(0), sharding={replicated metadata={op_name="a"}} %p1 = s32[1,24,24,24,5] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[1,24,24,24,5] copy(%p1) %updates = f32[1,24,24,24,3] parameter(2), sharding={devices=[1,2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="c"}} %scatter = f32[1,24,24,24,3,3] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={4}, inserted_window_dims={0,1,2,3,4}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, sharding={replicated metadata={op_name="d"}} ROOT %copy = f32[1,24,24,24,3,3] copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,2,1]0,1,2,3,4,5,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %input.0 = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %input.1 = f32[2,9] parameter(1), sharding={replicated metadata={op_name="b"}} %p2 = s32[3,3] parameter(2), sharding={replicated metadata={op_name="c"}} %indices = s32[3,3] copy(%p2) %updates.0 = f32[3,3,9] parameter(3), sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="d"}} %updates.1 = f32[3,3,9] parameter(4), sharding={devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="e"}} ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=2, sharding={{replicated metadata={op_name="d"}}, {replicated metadata={op_name="e"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "indices"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("d"), CreateMetadata("e")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2]0,1 metadata={op_name="b"}} %p2 = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[3] parameter(1), sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}} %p2 = f32[3,9] parameter(2), sharding={replicated metadata={op_name="c"}} %updates = f32[3,9] copy(%p2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate2_PartialReplicate) { const char* const hlo_string = R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } ENTRY entry { %input = bf16[15,8] parameter(0), sharding={replicated metadata={op_name="a"}} %indices = s32[8,1,1] parameter(1), sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}} %p2 = bf16[8,1,8] parameter(2), sharding={replicated metadata={op_name="c"}} %updates = bf16[8,1,8] copy(%p2) ROOT %scatter = bf16[15,8]{1,0} scatter(bf16[15,8] %input, s32[8,1,1] %indices, bf16[8,1,8] %updates), update_window_dims={2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=2, to_apply=%add, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) { lhs.0 = f32[] parameter(0) lhs.1 = f32[] parameter(1) rhs.0 = f32[] parameter(2) rhs.1 = f32[] parameter(3) sum.0 = f32[] add(lhs.0, rhs.0) sum.1 = f32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY entry { %input.0 = f32[2,9] parameter(0), sharding={replicated metadata={op_name="a"}} %input.1 = f32[2,9] parameter(1), sharding={replicated metadata={op_name="b"}} %indices = s32[3,3] parameter(2), sharding={devices=[2,2]0,1,2,3 metadata={op_name="c"}} %p3 = f32[3,3,9] parameter(3), sharding={replicated metadata={op_name="d"}} %updates.0 = f32[3,3,9] copy(%p3) %p4 = f32[3,3,9] parameter(4), sharding={replicated metadata={op_name="e"}} %updates.1 = f32[3,3,9] copy(%p4) ROOT %scatter = (f32[2,9],f32[2,9])scatter(%input.0, %input.1, %indices, %updates.0, %updates.1), to_apply=add, update_window_dims={2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=2, sharding={replicated metadata={op_name="d"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "updates.0"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } instruction = FindInstruction(module.get(), "updates.1"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("c")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise) { const char* const hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[2,9] parameter(1), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}} %lhs = f32[2,9] copy(%p0) %rhs = f32[2,9] copy(%p1) %add = f32[2,9] add(%lhs, %rhs) ROOT %copy = f32[2,9] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT(lhs, op::Sharding("{devices=[2,2]0,2,1,3}")); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); EXPECT_THAT(rhs, op::Sharding("{devices=[2,2]0,2,1,3}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT(add, op::Sharding("{devices=[2,2]0,2,1,3}")); for (HloInstruction* instruction : {lhs, rhs, add}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise2) { const char* const hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[2,9] parameter(0), sharding={devices=[1,2,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %p1 = f32[2,9] parameter(1), sharding={devices=[2,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}} %lhs = f32[2,9] copy(%p0) %rhs = f32[2,9] copy(%p1) %add = f32[2,9] add(%lhs, %rhs) ROOT %copy = f32[2,9] copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* lhs = FindInstruction(module.get(), "lhs"); ASSERT_NE(lhs, nullptr); EXPECT_THAT( lhs, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* rhs = FindInstruction(module.get(), "rhs"); ASSERT_NE(rhs, nullptr); EXPECT_THAT( rhs, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT( add, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); EXPECT_THAT(rhs->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); EXPECT_THAT(add->sharding(), ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")})); } else { for (HloInstruction* instruction : {lhs, rhs}) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingTransposeForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0), sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} %transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0} ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "transpose"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[1,2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingTransposeBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %transpose { %param = f32[7,11,13]{2,1,0} parameter(0) %copy = f32[7,11,13]{2,1,0} copy(%param) ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0}, sharding={devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[2,1,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate.19), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0, s32[2,8,4]{2,1,0} %shard-barrier-from.1), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, GatherBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %shard-barrier-to = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %shard-barrier-to, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT(copy_p, op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromOperandToResult) { const char* const hlo_string = R"( HloModule module ENTRY entry { %input = f32[10,3,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]} %indices = s32[14,10,6,2] parameter(1) ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3}, collapsed_slice_dims={1}, operand_batching_dims={0,2}, start_indices_batching_dims={1,0}, start_index_map={1,3}, index_vector_dim=3, slice_sizes={1,1,1,4} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0," "3,1) last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromIndicesToResult) { const char* const hlo_string = R"( HloModule module ENTRY entry { %input = f32[10,3,14,4] parameter(0) %indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]} ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3}, collapsed_slice_dims={1}, operand_batching_dims={0,2}, start_indices_batching_dims={1,0}, start_index_map={1,3}, index_vector_dim=3, slice_sizes={1,1,1,4} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, GatherBackwardWithExplicitBatchDims) { const char* const hlo_string = R"( HloModule module ENTRY entry { %input = f32[10,3,14,4] parameter(0) %indices = s32[14,10,6,2] parameter(1) ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3}, collapsed_slice_dims={1}, operand_batching_dims={0,2}, start_indices_batching_dims={1,0}, start_index_map={1,3}, index_vector_dim=3, slice_sizes={1,1,1,4}, sharding={devices=[2,2,2,2]<=[16]} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) " "last_tile_dim_replicate}")); EXPECT_THAT( module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromOperandToResult) { const char* const hlo_string = R"( HloModule module min (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT min = f32[] minimum(lhs, rhs) } ENTRY entry { %input = f32[10,6,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]} %indices = s32[14,10,6,2] parameter(1) %updates = f32[14,10,6,2] parameter(2) ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates), to_apply=min, update_window_dims={3}, inserted_window_dims={1}, scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2}, scatter_indices_batching_dims={1,0}, index_vector_dim=3 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,2,2,2]<=[16]}")); } TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromIndicesToResult) { const char* const hlo_string = R"( HloModule module min (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT min = f32[] minimum(lhs, rhs) } ENTRY entry { %input = f32[10,6,14,4] parameter(0) %indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]} %updates = f32[14,10,6,2] parameter(2) ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates), to_apply=min, update_window_dims={3}, inserted_window_dims={1}, scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2}, scatter_indices_batching_dims={1,0}, index_vector_dim=3 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Sharding( "{devices=[2,1,2,1,4]<=[2,2,4]T(1,0,2) last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromUpdatesToResult) { const char* const hlo_string = R"( HloModule module min (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT min = f32[] minimum(lhs, rhs) } ENTRY entry { %input = f32[10,6,14,4] parameter(0) %indices = s32[14,10,6,2] parameter(1) %updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,2,2,2]<=[16]} ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates), to_apply=min, update_window_dims={3}, inserted_window_dims={1}, scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2}, scatter_indices_batching_dims={1,0}, index_vector_dim=3 })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) " "last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, ScatterBackwardWithExplicitBatchDims) { const char* const hlo_string = R"( HloModule module min (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT min = f32[] minimum(lhs, rhs) } ENTRY entry { %input = f32[10,6,14,4] parameter(0) %indices = s32[14,10,6,2] parameter(1) %updates = f32[14,10,6,4] parameter(2) ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates), to_apply=min, update_window_dims={3}, inserted_window_dims={1}, scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2}, scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,2,2,2]<=[16]} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true, true, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{devices=[2,2,2,2]<=[16]}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[2,2,1,1,4]<=[2,2,2,2]T(2,0,1,3) " "last_tile_dim_replicate}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(2), op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,3,1) " "last_tile_dim_replicate}")); } TEST_P(ParameterizedMetadataTest, ParallelGatherFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelGatherFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT(copy_p, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass2) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[4,8,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,4,1,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT(copy_p, op::Sharding("{devices=[4,1,1,1]0,1,4,5}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "gather"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding( "{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT( copy_p, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass2) { const char* const hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[4,8,2,2]{3,2,1,0} %copy.p, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}")); auto* copy_p = FindInstruction(module.get(), "copy.p"); ASSERT_NE(copy_p, nullptr); EXPECT_THAT( copy_p, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterForwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %shard-barrier-from.2 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0, s32[2,8,4]{2,1,0} %shard-barrier-from.1, s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.2), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_FALSE(instruction->has_sharding()); } TEST_P(ParameterizedMetadataTest, ScatterBackwardPassWithBarrier) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %shard-barrier-to.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %shard-barrier-to.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT(copy_p0, op::Sharding("{replicated}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); } TEST_P(ParameterizedMetadataTest, ParallelScatterFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterFromUpdateForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %copy.p0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %scatter = s32[4,8,2,2]{3,2,1,0} scatter( s32[4,8,2,2]{3,2,1,0} %copy.p0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={1,0}, index_vector_dim=0, sharding={devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT(copy_p1, op::Sharding("{devices=[1,4,1,1]0,1,4,5}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterFromOperandForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterFromIndexForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterFromUpdateForwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %copy.p0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding( "{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT( copy_p0, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT( copy_p1, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass2) { const char* const hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %scatter = s32[4,8,2,2]{3,2,1,0} scatter( s32[4,8,2,2]{3,2,1,0} %copy.p0, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p1), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={1,0}, index_vector_dim=0, sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT( concatenate, op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT( copy_p0, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT( copy_p1, op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ParallelScatterFromOperandForwardPass_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) { lhs.0 = s32[] parameter(0) lhs.1 = s32[] parameter(1) rhs.0 = s32[] parameter(2) rhs.1 = s32[] parameter(3) sum.0 = s32[] add(lhs.0, rhs.0) sum.1 = s32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3) %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[8,4,2,2]{3,2,1,0} %parameter.1, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.2, s32[8,4,2,2]{3,2,1,0} %parameter.3), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{devices=[8,1,1,1]0,1,4,5,2,3,6,7},{devices=[4,1," "1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterFromIndexForwardPass_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) { lhs.0 = s32[] parameter(0) lhs.1 = s32[] parameter(1) rhs.0 = s32[] parameter(2) rhs.1 = s32[] parameter(3) sum.0 = s32[] add(lhs.0, rhs.0) sum.1 = s32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1, sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}} %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3) %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[8,4,2,2]{3,2,1,0} %parameter.1, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.2, s32[8,4,2,2]{3,2,1,0} %parameter.3), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 " "last_tile_dim_replicate},{devices=[4,1,1,1,2]0,1,4," "5,2,3,6,7 last_tile_dim_replicate}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterFromUpdateForwardPass_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) { lhs.0 = s32[] parameter(0) lhs.1 = s32[] parameter(1) rhs.0 = s32[] parameter(2) rhs.1 = s32[] parameter(3) sum.0 = s32[] add(lhs.0, rhs.0) sum.1 = s32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2), sharding={devices=[1,8,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}} %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3), sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}} %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter( s32[8,4,2,2]{3,2,1,0} %parameter.0, s32[8,4,2,2]{3,2,1,0} %parameter.1, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %parameter.2, s32[8,4,2,2]{3,2,1,0} %parameter.3), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "scatter"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{devices=[1,8,1,1]0,1,4,5,2,3,6,7},{devices=[4,1," "1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding().tuple_elements()[0], ShardingMetadata({CreateMetadata("a")})); EXPECT_THAT(instruction->sharding().tuple_elements()[1], ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) { lhs.0 = s32[] parameter(0) lhs.1 = s32[] parameter(1) rhs.0 = s32[] parameter(2) rhs.1 = s32[] parameter(3) sum.0 = s32[] add(lhs.0, rhs.0) sum.1 = s32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY %module { %parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0) %parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2) %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3) %copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3) %scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter( s32[8,4,2,2]{3,2,1,0} %copy.p0, s32[8,4,2,2]{3,2,1,0} %copy.p1, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p2, s32[8,4,2,2]{3,2,1,0} %copy.p3), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={{devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}, {devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}} ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT( copy_p1, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); auto* copy_p2 = FindInstruction(module.get(), "copy.p2"); ASSERT_NE(copy_p2, nullptr); EXPECT_THAT(copy_p2, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}")); auto* copy_p3 = FindInstruction(module.get(), "copy.p3"); ASSERT_NE(copy_p3, nullptr); EXPECT_THAT( copy_p3, op::Sharding( "{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } for (HloInstruction* instruction : {copy_p1, copy_p3}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2_Variadic) { const char* const hlo_string = R"( HloModule module add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) { lhs.0 = s32[] parameter(0) lhs.1 = s32[] parameter(1) rhs.0 = s32[] parameter(2) rhs.1 = s32[] parameter(3) sum.0 = s32[] add(lhs.0, rhs.0) sum.1 = s32[] add(lhs.1, rhs.1) ROOT tuple = tuple(sum.0, sum.1) } ENTRY %module { %parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0) %copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0) %parameter.1 = s32[4,8,2,2]{3,2,1,0} parameter(1) %copy.p1 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2 %concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %iota2), dimensions={0} %parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2) %parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3) %copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3) %scatter = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) scatter( s32[4,8,2,2]{3,2,1,0} %copy.p0, s32[4,8,2,2]{3,2,1,0} %copy.p1, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %copy.p2, s32[8,4,2,2]{3,2,1,0} %copy.p3), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={1,0}, index_vector_dim=0, sharding={{devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}}, {devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="b"}}} ROOT %copy = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* concatenate = FindInstruction(module.get(), "concatenate"); ASSERT_NE(concatenate, nullptr); EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}")); auto* copy_p0 = FindInstruction(module.get(), "copy.p0"); ASSERT_NE(copy_p0, nullptr); EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}")); auto* copy_p1 = FindInstruction(module.get(), "copy.p1"); ASSERT_NE(copy_p1, nullptr); EXPECT_THAT( copy_p1, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); auto* copy_p2 = FindInstruction(module.get(), "copy.p2"); ASSERT_NE(copy_p2, nullptr); EXPECT_THAT(copy_p2, op::Sharding("{devices=[1,4,1,1]0,1,4,5}")); auto* copy_p3 = FindInstruction(module.get(), "copy.p3"); ASSERT_NE(copy_p3, nullptr); EXPECT_THAT( copy_p3, op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } for (HloInstruction* instruction : {copy_p1, copy_p3}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("b")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndOperandPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndIndexPassthroughFromIndicesForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedIndexParallelAndTrivialSlicedOperandBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, GatherMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,2,2,1]0,4,1,5 metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{replicated}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{replicated}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedOperandPassthroughAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, GatherMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, GatherMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %gather = s32[8,4,2,2]{3,2,1,0} gather( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices), offset_dims={2,3}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0, slice_sizes={1,1,2,2}, sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{replicated}")); const HloInstruction* gather = FindInstruction(module.get(), "gather"); ASSERT_NE(gather, nullptr); EXPECT_THAT( gather, op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, gather}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndOperandPassthroughFromUpdateForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2), sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndOperandPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, ScatterMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndTrivialSlicedOperandBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndIndexPassthroughFromIndexForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndIndexPassthroughFromUpdateForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1) %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2), sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedIndexParallelAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[1,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1), sharding={devices=[1,1,2,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1 %concatenate = s32[2,8,4]{2,1,0} concatenate( s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %concatenate, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "concatenate"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, ScatterMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{replicated}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT(indices, op::Sharding("{replicated}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, ScatterMergedOperandPassthroughAndIndexPassthroughFromUpdateForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2), sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedOperandPassthroughAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P( ParameterizedMetadataTest, ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndUpdateForwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0), sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1) %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2), sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0 ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, ScatterMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) { absl::string_view hlo_string = R"( HloModule module add (lhs: s32[], rhs: s32[]) -> s32[] { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT sum = s32[] add(lhs, rhs) } ENTRY %module { %arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0) %arg.1 = s32[2,8,4]{2,1,0} parameter(1) %arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2) %operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0) %indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1), sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}} %update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2) %scatter = s32[8,4,2,2]{3,2,1,0} scatter( s32[8,4,2,2]{3,2,1,0} %operand, s32[2,8,4]{2,1,0} %indices, s32[8,4,2,2]{3,2,1,0} %update), to_apply=add, update_window_dims={2,3}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=0, sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}} ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); ASSERT_NE(operand, nullptr); EXPECT_THAT( operand, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); const HloInstruction* indices = FindInstruction(module.get(), "indices"); ASSERT_NE(indices, nullptr); EXPECT_THAT( indices, op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* update = FindInstruction(module.get(), "update"); ASSERT_NE(update, nullptr); EXPECT_THAT( update, op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}")); const HloInstruction* scatter = FindInstruction(module.get(), "scatter"); ASSERT_NE(scatter, nullptr); EXPECT_THAT( scatter, op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}")); for (const HloInstruction* instruction : {operand, indices, update, scatter}) { if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } } TEST_P(ParameterizedMetadataTest, CorrectlyReplicateGatherIndex) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = bf16[1,2,2,2,8]{4,3,2,1,0} parameter(0) %parameter.1 = s32[1,2,2]{2,1,0} parameter(1) %index = s32[1,2,2]{2,1,0} copy(%parameter.1) %gather = bf16[1,2,2,2,8]{4,3,2,1,0} gather( bf16[1,2,2,2,8]{4,3,2,1,0} %parameter.0, s32[1,2,2]{2,1,0} %index), offset_dims={2,3,4}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,2,2,8}, sharding={devices=[1,1,2,1,1]0,1 metadata={op_name="a"}} ROOT %copy = bf16[1,2,2,2,8]{4,3,2,1,0} copy(%gather) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* index = FindInstruction(module.get(), "index"); ASSERT_NE(index, nullptr); EXPECT_THAT(index, op::Sharding("{replicated}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(index->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(index->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, GatherToOperand_ParallelDimIsNotPartitioned) { absl::string_view hlo_string = R"( HloModule module ENTRY %module { %parameter.0 = s32[2,1000,1]{2,1,0} parameter(0) %parameter.1 = bf16[2,4819,4]{2,1,0} parameter(1) %iota = s32[2,1000,1]{1,0,2} iota(), iota_dimension=0 %operand = bf16[2,4819,4]{2,1,0} copy(%parameter.1) %index = s32[2,1000,2]{2,1,0} concatenate(s32[2,1000,1]{1,0,2} %iota, s32[2,1000,1]{2,1,0} %parameter.0), dimensions={2}, sharding={devices=[1,4,1]0,1,2,3} ROOT %gather = bf16[2,1000,4]{2,1,0} gather(bf16[2,4819,4]{2,1,0} %operand, s32[2,1000,2]{2,1,0} %index), offset_dims={2}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4}, sharding={devices=[1,4,1]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* operand = FindInstruction(module.get(), "operand"); EXPECT_THAT(operand, op::Sharding("{replicated}")); } TEST_P(ParameterizedMetadataTest, ManualSubgroupForward) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroup_SingleOperandHasSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1) %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } auto* operand = FindInstruction(module.get(), "copy"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(operand->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroup_OneOperandReplicate) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="a"}} %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1) ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "add"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } auto* operand = FindInstruction(module.get(), "copy"); ASSERT_NE(operand, nullptr); EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(operand->sharding(), ShardingMetadata({})); } } TEST_P(ParameterizedMetadataTest, ManualSubgroupBackward) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0) %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1) %copy.1 = f32[6,3]{1,0} copy(%param1) %add = f32[6,3]{1,0} add(%copy, %copy.1), sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}} ROOT %copy.2 = f32[6,3]{1,0} copy(%add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(false, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}")); if (GetParam().propagate_metadata && !GetParam().clear_metadata) { EXPECT_THAT(instruction->sharding(), ShardingMetadata({CreateMetadata("a")})); } else { EXPECT_THAT(instruction->sharding(), ShardingMetadata({})); } } TEST_F(ShardingPropagationTest, SimpleManual) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %entry { %param0 = f32[6,3] parameter(0) %copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1} %annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding", sharding={devices=[2,1]0,1} %to_manual = f32[3,3] custom-call(%annotate), custom_call_target="SPMDFullToShardShape", sharding={manual} %zero = f32[] constant(0) %reduce = f32[3] reduce(%to_manual, %zero), dimensions={1}, to_apply=%add %annotate2 = f32[3] custom-call(%reduce), custom_call_target="Sharding", sharding={manual} %to_auto = f32[6] custom-call(%annotate2), custom_call_target="SPMDShardToFullShape", sharding={devices=[2]0,1} ROOT %copy.2 = f32[6] copy(%to_auto) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reduce"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{manual}")); } TEST_F(ShardingPropagationTest, SimpleManualTuple) { const char* const hlo_string = R"( HloModule module %add { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY %entry { %param0 = f32[6,3] parameter(0) %copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1} %annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding", sharding={devices=[2,1]0,1} %to_manual = f32[3,3] custom-call(%annotate), custom_call_target="SPMDFullToShardShape", sharding={manual} %t = (f32[3,3]) tuple(%to_manual) %gte = f32[3,3] get-tuple-element(%t), index=0 %to_auto = f32[3,3] custom-call(%gte), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1} ROOT %copy.2 = f32[3,3] copy(%to_auto) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "t"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{manual}}")); instruction = FindInstruction(module.get(), "gte"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{manual}")); } TEST_F(ShardingPropagationTest, DefaultManualCustomCallForward) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3]{1,0} parameter(0), sharding={manual metadata={op_name="a"}} %copy = f32[6,3]{1,0} copy(%param0) %param1 = f32[6,3]{1,0} parameter(1) %copy.1 = f32[6,3]{1,0} copy(%param1) %param2 = f32[6,3]{1,0} parameter(2) %copy.2 = f32[6,3]{1,0} copy(%param2) %custom-call = (f32[], f32[6,3]{1,0}) custom-call(%copy, %copy.1, %copy.2), custom_call_target="some_custom_call" ROOT %copy.3 = (f32[], f32[6,3]{1,0}) copy(%custom-call) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "custom-call"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{manual},{manual}}")); } TEST_F(ShardingPropagationTest, RefineUnspecifiedDims) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3] parameter(0) %copy = f32[6,3] copy(%param0), sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate} %annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding", backend_config="unspecified_dims=[1]", sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate} %copy.2 = f32[6,3] copy(%annotate) ROOT %copy.3 = f32[6,3] copy(%copy.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "copy.2"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}")); } TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3,8] parameter(0) %copy = f32[6,3,8] copy(%param0), sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate} %annotate = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %to_manual = f32[3,3,8] custom-call(%annotate), custom_call_target="SPMDFullToShardShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %to_auto = f32[6,3,8] custom-call(%annotate2), custom_call_target="SPMDShardToFullShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %copy.2 = f32[6,3,8] copy(%to_auto) ROOT %copy.3 = f32[6,3,8] copy(%copy.2), sharding={devices=[1,1,2,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* copy2 = FindInstruction(module.get(), "copy.2"); ASSERT_NE(copy2, nullptr); EXPECT_THAT(copy2, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}")); auto* to_manual = FindInstruction(module.get(), "to_manual"); ASSERT_NE(to_manual, nullptr); EXPECT_THAT( to_manual, op::Sharding( "{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual}}")); auto* to_auto = FindInstruction(module.get(), "to_auto"); ASSERT_NE(to_auto, nullptr); EXPECT_THAT(to_auto, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}")); } TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3,8] parameter(0) %copy = f32[6,3,8] copy(%param0) %annotate1 = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %to_manual = f32[3,3,8] custom-call(%annotate1), custom_call_target="SPMDFullToShardShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %annotate3 = f32[3,3,8] custom-call(%annotate2), custom_call_target="Sharding", backend_config="unspecified_dims=[1,2]", sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}} %to_auto = f32[6,3,8] custom-call(%annotate3), custom_call_target="SPMDShardToFullShape", backend_config="unspecified_dims=[1,2]", sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate} %copy.2 = f32[6,3,8] copy(%to_auto), sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate} ROOT %copy.3 = f32[6,3,8] copy(%copy.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* copy = FindInstruction(module.get(), "copy"); ASSERT_NE(copy, nullptr); EXPECT_THAT( copy, op::Sharding( "{devices=[2,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, DoNotRefineUnspecifiedDimsOnManual) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[6,3] parameter(0), sharding={manual} %annotate = f32[6,3] custom-call(%param0), custom_call_target="Sharding", backend_config="unspecified_dims=[1]", sharding={manual} ROOT %copy.2 = f32[6,3] copy(%annotate), sharding={manual} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); EXPECT_TRUE(changed); for (auto* hlo : module->entry_computation()->instructions()) { EXPECT_TRUE(hlo->sharding().IsManual()); } } TEST_F(ShardingPropagationTest, DoNotPassManualShardingToSPMDShardToFullShape) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated} custom-call.2 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated} custom-call.3 = f32[2,3]{1,0} custom-call(custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual} custom-call.4 = f32[2,3]{1,0} custom-call(custom-call.3), custom_call_target="Sharding", sharding={manual} ROOT custom-call.5 = f32[16,3]{1,0} custom-call(custom-call.4), custom_call_target="SPMDShardToFullShape", sharding={replicated} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); EXPECT_TRUE(changed); auto spmd_shard_to_full = module->entry_computation()->root_instruction(); CHECK(spmd_shard_to_full->IsCustomCall("SPMDShardToFullShape")); EXPECT_FALSE(spmd_shard_to_full->sharding().IsManual()); } TEST_F(ShardingPropagationTest, ManualShardingPassThroughSplitConstant) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated} p.1 = f32[2,3]{1,0} parameter(1), sharding={replicated} constant = f32[2,3]{1,0} constant({{0,1,2},{3,4,5}}) custom-call.0 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated} custom-call.1 = f32[2,3]{1,0} custom-call(custom-call.0), custom_call_target="SPMDFullToShardShape", sharding={manual} add.0 = f32[2,3]{1,0} add(constant, custom-call.1) custom-call.2 = f32[2,3]{1,0} custom-call(add.0), custom_call_target="SPMDShardToFullShape", sharding={replicated} add.1 = f32[2,3]{1,0} add(constant, p.1) ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(custom-call.2, add.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool is_split, HloConstantSplitter(true).Run(module.get())); EXPECT_TRUE(is_split); TF_ASSERT_OK_AND_ASSIGN(auto _, HloDCE().Run(module.get())); (void)_; TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* add0 = FindInstruction(module.get(), "add.0"); const HloInstruction* manual_constant = add0->operand(0); EXPECT_TRUE(manual_constant->IsConstant() && manual_constant->sharding().IsManual()); const HloInstruction* add1 = FindInstruction(module.get(), "add.1"); const HloInstruction* replicate_constant = add1->operand(0); EXPECT_TRUE(replicate_constant->IsConstant() && replicate_constant->sharding().IsReplicated()); } TEST_F(ShardingPropagationTest, ReshapeNoMatchSubgroupManual) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[1,3,3] parameter(0), sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dims={manual}} %reshape = f32[3,1,3,1] reshape(%param0) ROOT %copy = f32[3,1,3,1] copy(%reshape) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "reshape"); ASSERT_NE(instruction, nullptr); EXPECT_THAT( instruction, op::Sharding( "{devices=[1,1,1,1,2,2]0,2,1,3 last_tile_dims={manual,replicated}}")); } TEST_F(ShardingPropagationTest, X64Combine) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[102,192,192] parameter(0), sharding={devices=[1,2,2]0,1,2,3} %param1 = f32[102,192,192] parameter(1), sharding={devices=[1,2,2]0,1,2,3} %custom-call = f64[102,192,192] custom-call(f32[102,192,192] %param0, f32[102,192,192] %param1), custom_call_target="X64Combine" ROOT %copy = f64[102,192,192] copy(%custom-call), sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "custom-call"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}")); } TEST_F(ShardingPropagationTest, LayoutConstraint) { const char* const hlo_string = R"( HloModule module ENTRY %reshape { %param0 = f32[102,192,192] parameter(0), sharding={devices=[1,2,2]0,1,2,3} %custom-call = f32[102,192,192]{0,1,2} custom-call(f32[102,192,192] %param0), custom_call_target="LayoutConstraint" ROOT %copy = f32[102,192,192] copy(%custom-call), sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* instruction = FindInstruction(module.get(), "custom-call"); EXPECT_THAT(instruction->shape(), ShapeUtil::MakeShapeWithDenseLayout( F32, {102, 192, 192}, {0, 1, 2})); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}")); } TEST_F(ShardingPropagationTest, OffloadingPropagation) { const char* const hlo_string = R"( HloModule module ENTRY %offloading { %param0 = f32[1,256,128] parameter(0), sharding={devices=[1,1,4]0,1,2,3} %zero = f32[] constant(0.0) %broadcast = f32[256,256,128] broadcast(%zero), dimensions={} %izero = s32[] constant(0) %custom-call.0 = f32[1,256,128] custom-call(f32[1,256,128] %param0), custom_call_target="MoveToHost" %dynamic-update-slice = f32[256,256,128] dynamic-update-slice(%broadcast, %custom-call.0, %izero, %izero, %izero) %dynamic-slice = f32[1,256,128] dynamic-slice(%dynamic-update-slice, %izero, %izero, %izero), dynamic_slice_sizes={1,256,128} %custom-call.1 = f32[1,256,128] custom-call(f32[1,256,128] %dynamic-slice), custom_call_target="MoveToDevice" ROOT %copy = f32[1,256,128] copy(%custom-call.1), sharding={devices=[1,4,1]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* to_host = FindInstruction(module.get(), "custom-call.0"); EXPECT_THAT(to_host, op::Sharding("{devices=[1,1,4]0,1,2,3}")); auto* from_host_input = FindInstruction(module.get(), "custom-call.1")->operand(0); EXPECT_THAT(from_host_input, op::Sharding("{devices=[1,1,4]0,1,2,3}")); } TEST_P(ParameterizedMetadataTest, PropagateThroughSingleUsers) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], f32[10,10], f32[10,10]) parameter(0) %count.cond = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT } %body { %vars = (u32[], f32[10,10], f32[10,10]) parameter(0) %count = u32[] get-tuple-element(%vars), index=0 %acc = f32[10,10] get-tuple-element((u32[], f32[10,10],f32[10,10]) %vars), index=1 %cvt = s32[10,10] convert(acc) %one = u32[] constant(1) %count.1 = u32[] add(u32[] %count, u32[] %one) %acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate} %acc.1 = f32[10,10] convert(acc.i) ROOT %tuple = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc, f32[10,10] %acc.1) } ENTRY %entry { %p0 = f32[10,10] parameter(0) %p0.copy = f32[10,10] copy(f32[10,10] %p0), sharding={devices=[4,1]0,1,2,3} %p1 = f32[10,10] parameter(1) %p2 = f32[10,10] parameter(2) %p2.copy = f32[10,10] copy(f32[10,10] %p2) %zero = u32[] constant(0) %init = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy, f32[10,10] %p2.copy) %while = (u32[], f32[10,10], f32[10,10]) while((u32[], f32[10,10], f32[10,10]) %init), body=%body, condition=%cond %g1 = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=0 %g2 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=1 %g3 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=2 ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2, %g3) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto body_root = FindInstruction(module.get(), "tuple"); EXPECT_NE(nullptr, body_root); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); VLOG(1) << "Mod:"; XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* convert_instr = FindInstruction(module.get(), "cvt"); EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}")); } TEST_P(ParameterizedMetadataTest, NestedTupleFromUserSharding) { const char* const hlo_string = R"( HloModule module %cond { %vars.cond = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0) %count.cond = u32[] get-tuple-element(%vars.cond), index=0 %limit = u32[] constant(10) ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT } %body { %vars = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0) %count = u32[] get-tuple-element(%vars), index=0 %fwd = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%vars), index=1 %acc = f32[10,10] get-tuple-element(%vars), index=2 %cvt = s32[10,10] convert(acc) %one = u32[] constant(1) %count.1 = u32[] add(u32[] %count, u32[] %one) %acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt) %acc.1 = f32[10,10] convert(acc.i) ROOT %tuple = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%count.1, %fwd, %acc.1) } ENTRY %entry { %p0 = f32[10,10] parameter(0) %p0.copy = f32[10,10] copy(f32[10,10] %p0) %p1 = f32[10,10] parameter(1) %p1.copy = f32[10,10] copy(f32[10,10] %p1) %p2 = f32[10,10] parameter(2) %p2.copy = f32[10,10] copy(f32[10,10] %p2) %zero = u32[] constant(0) %zerof = f32[] constant(0) %init0 = (f32[10,10], f32[10,10]) tuple(%p0.copy, %p1.copy) %init1 = ((f32[10,10], f32[10,10]), f32[]) tuple(%init0, %zerof) %init = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%zero, %init1, %p2.copy) %while = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) while(%init), body=%body, condition=%cond %g1 = u32[] get-tuple-element(%while), index=0 %g2 = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%while), index=1 %g2.0 = (f32[10,10], f32[10,10]) get-tuple-element(%g2), index=0 %g2.0.0 = f32[10,10] get-tuple-element(%g2.0), index=0 %g3 = f32[10,10] get-tuple-element(%while), index=2 %copy.g3 = f32[10,10] copy(%g3), sharding={devices=[4,1]0,1,2,3} ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2.0.0, %g3) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); auto body_root = FindInstruction(module.get(), "tuple"); EXPECT_NE(nullptr, body_root); if (GetParam().clear_metadata) { ClearMetadata(module.get()); } TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, GetParam().propagate_metadata) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); const HloInstruction* convert_instr = FindInstruction(module.get(), "p2.copy"); EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}")); } TEST_F(ShardingPropagationTest, CSEPreventionOnly) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[] parameter(0), sharding={replicated} %br = f32[4] broadcast(%param0), dimensions={} %add = f32[4] add(%br, %br) %annotate = f32[4] custom-call(%add), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={replicated} ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false}, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* br = FindInstruction(module.get(), "br"); EXPECT_THAT(br, op::Sharding("{devices=[4]0,1,2,3}")); EXPECT_THAT(br->sharding(), ShardingMetadata({CreateMetadata( "_sharding_propagation_cse_prevention")})); EXPECT_THAT(FindInstruction(module.get(), "annotate"), AllOf(op::Sharding("{replicated}"), op::CustomCall())); EXPECT_FALSE(FindInstruction(module.get(), "add")->has_sharding()); } TEST_F(ShardingPropagationTest, RemoveCSEPrevention) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[] parameter(0), sharding={replicated} %br = f32[4] broadcast(%param0), dimensions={}, sharding={devices=[4]0,1,2,3 metadata={op_name="_sharding_propagation_cse_prevention"}} %add = f32[4] add(%br, %br) %annotate = f32[4] custom-call(%add), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={replicated} ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]3,2,1,0} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(FindInstruction(module.get(), "br"), op::Sharding("{devices=[4]3,2,1,0}")); EXPECT_THAT(FindInstruction(module.get(), "add"), op::Sharding("{devices=[4]3,2,1,0}")); } TEST_F(ShardingPropagationTest, ReshapeTrivialDimPartialReplicate) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[8,128] parameter(0), sharding={replicated} %c = f32[8,128] copy(%param0) %rsp = f32[8,1,128] reshape(%c), sharding={devices=[1,2,4]0,1,2,3,4,5,6,7} ROOT %copy = f32[8,1,128] copy(%rsp), sharding={devices=[1,2,4]0,1,2,3,4,5,6,7} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( FindInstruction(module.get(), "c"), op::Sharding("{devices=[1,4,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, EmptyTupleWithinTuple) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[2] parameter(0), sharding={replicated} %et = () tuple() %tuple = (f32[2], (), (), f32[2]) tuple(%param0, %et, %et, %param0) ROOT %copy = (f32[2], (), (), f32[2]) copy(%tuple) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); } TEST_F(ShardingPropagationTest, ContractingAsNonContractingCrash) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %p0 = f32[20,64,56,56]{3,2,1,0} parameter(0), sharding={replicated} %p1 = f32[1,1,256,64]{2,3,1,0} parameter(1), sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7} %convolution.4512 = f32[20,256,56,56]{3,2,1,0} convolution(%p0, %p1), window={size=1x1}, dim_labels=bf01_01oi->bf01 ROOT %copy = f32[20,256,56,56]{3,2,1,0} copy(%convolution.4512) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); } TEST_F(ShardingPropagationTest, PropagateReduceManualTuple) { const char* const hlo_string = R"( HloModule pjit orclone { lhs.1 = u32[] parameter(0) rhs.1 = u32[] parameter(2) or.2 = u32[] or(lhs.1, rhs.1) lhs.0 = u32[] parameter(1) rhs.0 = u32[] parameter(3) or.3 = u32[] or(lhs.0, rhs.0) ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3) } ENTRY %main.21 { select.104 = u32[2,2]{1,0} parameter(0), sharding={manual} shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual} constant.4183 = u32[] constant(0), sharding={manual} reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, to_apply=orclone ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); EXPECT_THAT(FindInstruction(module.get(), "reduce.1"), op::Sharding("{{manual}, {manual}}")); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); } TEST_F(ShardingPropagationTest, MergeCompatibleTiles) { const char* const hlo_string = R"( HloModule pjit ENTRY %main.21 { p = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(0), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7} p2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate} c0 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p) c1 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p2) a = bf16[8,4,256,1024,12288]{4,3,2,1,0} add(c0, c1) ROOT c2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(a), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(FindInstruction(module.get(), "c1"), op::Sharding("{devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}")); } TEST_F(ShardingPropagationTest, OutfeedUser) { const char* const hlo_string = R"( HloModule pjit ENTRY %main.21 { p = f32[10,128]{1,0} parameter(0) c = f32[10,128]{1,0} copy(p) t = (f32[10,128]{1,0}) tuple(c) a = token[] after-all() ROOT of = token[] outfeed((f32[10,128]{1,0}) %t, token[] %a), outfeed_shape=(f32[10,128]{1,0}), sharding={{devices=[2,1]0,1}, {maximal device=0}} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(FindInstruction(module.get(), "c"), op::Sharding("{devices=[2,1]0,1}")); } TEST_F(ShardingPropagationTest, SortForwardWithBarrier) { const char* const hlo_string = R"( HloModule module compare { p.0.lhs = f32[] parameter(0), sharding={replicated} p.0.rhs = f32[] parameter(1), sharding={replicated} ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated} } ENTRY entry { param.0 = f32[1024,1024]{1,0} parameter(0) negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7} %shard-barrier-from = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-from), dimensions={1}, is_stable=true, to_apply=compare ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_FALSE(FindInstruction(module.get(), "sort.0")->has_sharding()); } TEST_F(ShardingPropagationTest, SortBackwardWithBarrier) { const char* const hlo_string = R"( HloModule module compare { p.0.lhs = f32[] parameter(0), sharding={replicated} p.0.rhs = f32[] parameter(1), sharding={replicated} ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated} } ENTRY entry { param.0 = f32[1024,1024]{1,0} parameter(0) negate.0 = f32[1024,1024]{1,0} negate(param.0) %shard-barrier-to = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-to), dimensions={1}, is_stable=true, to_apply=compare, sharding={devices=[1,8]0,1,2,3,4,5,6,7} ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( std::ignore, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_THAT(FindInstruction(module.get(), "negate.0"), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankOne) { const char* const hlo_string = R"( HloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0}, s32[1024]{0})} compare { p.0.lhs = f32[] parameter(0), sharding={replicated} p.0.rhs = f32[] parameter(1), sharding={replicated} p.1.lhs = s32[] parameter(2), sharding={replicated} p.1.rhs = s32[] parameter(3), sharding={replicated} ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated} } ENTRY entry { param.0 = f32[1024]{0} parameter(0) negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]0,1,2,3,4,5,6,7} iota.0 = s32[1024]{0} iota(), iota_dimension=0 sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare ROOT copy.0 = (f32[1024]{0}, s32[1024]{0}) copy(sort.0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_FALSE(changed); } TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankTwo) { const char* const hlo_string = R"( HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0}, s32[1024,1024]{1,0})} compare { p.0.lhs = f32[] parameter(0), sharding={replicated} p.0.rhs = f32[] parameter(1), sharding={replicated} p.1.lhs = s32[] parameter(2), sharding={replicated} p.1.rhs = s32[] parameter(3), sharding={replicated} ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated} } ENTRY entry { param.0 = f32[1024,1024]{1,0} parameter(0) negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7} iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1 sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare ROOT copy.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) copy(sort.0) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(FindInstruction(module.get(), "iota.0"), op::Sharding("{devices=[1,8]0,1,2,3,4,5,6,7}")); EXPECT_THAT( FindInstruction(module.get(), "sort.0"), op::Sharding( "{{devices=[1,8]0,1,2,3,4,5,6,7}, {devices=[1,8]0,1,2,3,4,5,6,7}}")); } TEST_F(ShardingPropagationTest, ConditionalManual) { const char* const hlo_string = R"( HloModule module %true_comp { %tp = (f32[3,5], f32[]) parameter(0) %tgte.0 = f32[3,5] get-tuple-element(%tp), index=0 %tgte.1 = f32[] get-tuple-element(%tp), index=1 %ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0} %broadcast.1 = f32[5,3] broadcast(%tgte.1), dimensions={} %add.1 = f32[5,3] add(%broadcast.1, %ttr) ROOT %tr = (f32[5,3], f32[]) tuple(%add.1, %tgte.1) } %false_comp { %fp = (f32[5,3], f32[5,3], f32[]) parameter(0) %fgte.0 = f32[5,3] get-tuple-element(%fp), index=0 %fgte.1 = f32[] get-tuple-element(%fp), index=2 ROOT %fr = (f32[5,3], f32[]) tuple(%fgte.0, %fgte.1) } ENTRY entry { %cond = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}} %tp.0 = f32[3,5] parameter(1), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}} %fp.0 = f32[5,3] parameter(2), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}} %const0 = f32[] constant(0) %const1 = f32[] constant(1) %true_param = (f32[3,5], f32[]) tuple(%tp.0, %const0) %false_param = (f32[5,3], f32[5,3], f32[]) tuple(%fp.0, fp.0, %const1) ROOT %conditional = (f32[5,3], f32[]) conditional( %cond, %true_param, %false_param), true_computation=%true_comp, false_computation=%false_comp })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tp = FindInstruction(module.get(), "tp"); auto* true_param = FindInstruction(module.get(), "true_param"); EXPECT_EQ(tp->sharding(), true_param->sharding()); auto* fp = FindInstruction(module.get(), "fp"); auto* false_param = FindInstruction(module.get(), "false_param"); EXPECT_EQ(fp->sharding(), false_param->sharding()); } TEST_F(ShardingPropagationTest, WhileDSManual) { const char* const hlo_string = R"( HloModule module while.condition { arg_tuple = (s32[], pred[2,8,4]) parameter(0) tripcount = s32[] get-tuple-element(arg_tuple), index=0 triplimit = s32[] constant(2) ROOT compare.0 = pred[] compare(tripcount, triplimit), direction=LT } while.body { arg_tuple = (s32[], pred[2,8,4]) parameter(0) tripcount = s32[] get-tuple-element(arg_tuple), index=0 one = s32[] constant(0) tripcount_next = s32[] add(tripcount, one) preds.1 = pred[2,8,4] get-tuple-element(arg_tuple), index=1 zero.1 = s32[] constant(0) dynamic-slice.1 = pred[1,8,4] dynamic-slice(preds.1, tripcount, zero.1, zero.1), dynamic_slice_sizes={1,8,4}, sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}} ROOT result = (s32[], pred[2,8,4]) tuple(tripcount_next, preds.1) } ENTRY entry { preds = pred[2,8,4] parameter(0), sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}} zero = s32[] constant(0) tuple.13 = (s32[], pred[2,8,4]) tuple(zero, preds) ROOT result = while(tuple.13), condition=while.condition, body=while.body })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* tuple = FindInstruction(module.get(), "tuple.13"); EXPECT_THAT(tuple, op::Sharding("{{replicated}, {devices=[1,1,1,2,4]<=[8] " "last_tile_dims={manual, replicated}}}")); } TEST_F(ShardingPropagationTest, PropagateToOutput) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[] parameter(0), sharding={replicated} %br = f32[4] broadcast(%param0), dimensions={} %annotate = f32[4] custom-call(%br), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3} ROOT %add = f32[4] add(%annotate, %annotate), sharding={replicated} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToOutputTuplePartial) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[] parameter(0), sharding={replicated} %br = f32[4] broadcast(%param0), dimensions={} %annotate = f32[4] custom-call(%br), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3} %add = f32[4] add(%annotate, %annotate) %param1 = f32[] parameter(1), sharding={replicated} %br1 = f32[4] broadcast(%param1), dimensions={} %annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3} %add1 = f32[4] add(%annotate1, %annotate1) ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[4]0,1,2,3},{replicated}}")); } TEST_F(ShardingPropagationTest, PropagateToOutputTupleFull) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[] parameter(0), sharding={replicated} %br = f32[4] broadcast(%param0), dimensions={} %annotate = f32[4] custom-call(%br), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3} %add = f32[4] add(%annotate, %annotate) %param1 = f32[] parameter(1), sharding={replicated} %br1 = f32[4] broadcast(%param1), dimensions={} %annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding", backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3} %add1 = f32[4] add(%annotate1, %annotate1) ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true, true, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}")); } TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled1) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0) ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, false}) .Run(module.get())); EXPECT_FALSE(changed); EXPECT_FALSE( module->entry_computation()->parameter_instruction(0)->has_sharding()); } TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation(true).Run(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled3) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0) %param1 = f32[4] parameter(1), sharding={replicated} ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false}) .Run(module.get())); EXPECT_FALSE(changed); EXPECT_FALSE( module->entry_computation()->parameter_instruction(0)->has_sharding()); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled4) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, false}) .Run(module.get())); EXPECT_FALSE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, PropagateToParametersPartial1) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToParametersPartial2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0) %param1 = f32[4] parameter(1), sharding={replicated} ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_FALSE( module->entry_computation()->parameter_instruction(0)->has_sharding()); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToParametersPartial3) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1) ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToParametersPartial4) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0) %param1 = f32[4] parameter(1) ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_FALSE( module->entry_computation()->parameter_instruction(0)->has_sharding()); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToParametersFull1) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0) %param1 = f32[4] parameter(1) ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{devices=[4]0,1,2,3}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToParametersFull2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1) ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {true, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{devices=[4]0,1,2,3}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithoutSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param = (f32[4], f32[4]) parameter(0) %gte0 = f32[4] get-tuple-element(%param), index=0 %gte1 = f32[4] get-tuple-element(%param), index=1 ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {true, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}")); } TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding1) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}} %gte0 = f32[4] get-tuple-element(%param), index=0 %gte1 = f32[4] get-tuple-element(%param), index=1 ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{{replicated}, {devices=[4]0,1,2,3}}")); } TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding2) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}} %gte0 = f32[4] get-tuple-element(%param), index=0 %gte1 = f32[4] get-tuple-element(%param), index=1 ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {true, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}")); } TEST_F(ShardingPropagationTest, PropagateManualOutfeed) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p0 = f32[8]{0} parameter(0) p1 = f32[1]{0} parameter(1) tuple.1 = (f32[8]{0}) tuple(p0) constant.8 = u32[2]{0} constant({3, 12}) tuple.10 = (u32[2]{0}) tuple(constant.8) aa.1 = token[] after-all() outfeed.1 = token[] outfeed(tuple.10, aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}} outfeed.2 = token[] outfeed(tuple.1, outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}} ROOT tuple.15 = (f32[1]{0}, token[]) tuple(p1, outfeed.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true, true}, {true, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{replicated}, {manual}}")); } TEST_F(ShardingPropagationTest, PropagateFromDanglingShardingCustomCall) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p.0 = s32[40000]{0} parameter(0) add = s32[40000]{0} add(p.0, p.0) cc = s32[40000]{0} custom-call(add), custom_call_target="Sharding", sharding={devices=[4]0,1,2,3} ROOT mul = s32[40000]{0} multiply(add, add) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_TRUE(changed); HloDCE dce; TF_ASSERT_OK_AND_ASSIGN(bool dce_ed, RunHloPass(&dce, module.get())); EXPECT_TRUE(dce_ed); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "param0"); EXPECT_EQ(instruction, nullptr); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[4]0,1,2,3}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToParameterIfNotDivisible_WithSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[3] parameter(1), sharding={replicated} %pad_value = f32[] constant(0) %pad = f32[4] pad(%param1, %pad_value), padding=0_1 ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToParameterIfNotDivisible_WithoutSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[3] parameter(1) %pad_value = f32[] constant(0) %pad = f32[4] pad(%param1, %pad_value), padding=0_1 ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{replicated}")); EXPECT_THAT(module->entry_computation()->parameter_instruction(1), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToTupleParameterIfNotDivisible) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = (f32[4], f32[3]) parameter(0), sharding={{replicated}, {replicated}} %gte0 = f32[4] get-tuple-element(%param0), index=0 %gte1 = f32[3] get-tuple-element(%param0), index=1 %pad_value = f32[] constant(0) %pad = f32[4] pad(%gte1, %pad_value), padding=0_1 ROOT %add = f32[4] add(%gte0, %pad), sharding={devices=[4]0,1,2,3} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false, true}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->parameter_instruction(0), op::Sharding("{{replicated}, {replicated}}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToOutputIfNotDivisible_WithSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}, sharding={replicated} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToOutputIfNotDivisible_WithoutSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} ROOT %slice = f32[3] slice(%add), slice={[0:3:1]} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{replicated}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToOutputTupleIfNotDivisible_WithSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} %slice = f32[3] slice(%add), slice={[0:3:1]} ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice), sharding={{replicated}, {replicated}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false, true}, {false, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{replicated}, {replicated}}")); } TEST_F(ShardingPropagationTest, DoNotPropagateToOutputTupleIfNotDivisible_WithoutSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { %param0 = f32[4] parameter(0), sharding={replicated} %param1 = f32[4] parameter(1), sharding={replicated} %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3} %slice = f32[3] slice(%add), slice={[0:3:1]} ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true, true}, {false, false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}")); } TEST_F(ShardingPropagationTest, PropagateShardLikeDifferentSharding) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7} p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7} add.1 = s32[16,16] add(p.0, p.0) sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0} add.2 = s32[16,16] add(p.1, p.1) sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0} ROOT mul = s32[16,16] multiply(add.1, add.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* add_0 = FindInstruction(module.get(), "add.1"); ASSERT_NE(add_0, nullptr); auto* add_1 = FindInstruction(module.get(), "add.2"); ASSERT_NE(add_1, nullptr); EXPECT_NE(add_0->sharding(), add_1->sharding()); } TEST_F(ShardingPropagationTest, PropagateShardLikeSameSharding) { const char* const hlo_string = R"( HloModule module %add { %lhs = s32[] parameter(0) %rhs = s32[] parameter(1) ROOT %add = s32[] add(%lhs, %rhs) } ENTRY %entry { p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7} p.1 = s32[16,16] parameter(1) add.1 = s32[16,16] add(p.0, p.0) sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0} init = s32[] constant(0) reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add add.2 = s32[16,16] add(p.1, p.1) sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0} reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add ROOT mul = s32[] multiply(reduce.1, reduce.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* add_1 = FindInstruction(module.get(), "add.1"); ASSERT_NE(add_1, nullptr); auto* add_2 = FindInstruction(module.get(), "add.2"); ASSERT_NE(add_2, nullptr); EXPECT_EQ(add_1->sharding(), add_2->sharding()); } TEST_F(ShardingPropagationTest, PropagateShardAs) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7} p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7} add.1 = s32[16,16] add(p.0, p.0) sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0} add.2 = s32[16,16] add(p.1, p.1) sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0} ROOT mul = s32[16,16] multiply(add.1, add.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* add_1 = FindInstruction(module.get(), "add.1"); ASSERT_NE(add_1, nullptr); auto* add_2 = FindInstruction(module.get(), "add.2"); ASSERT_NE(add_2, nullptr); EXPECT_EQ(add_1->sharding(), add_2->sharding()); } TEST_F(ShardingPropagationTest, PropagateShardAsToParameters) { const char* const hlo_string = R"( HloModule module %add { %lhs = s32[] parameter(0) %rhs = s32[] parameter(1) ROOT %add = s32[] add(%lhs, %rhs) } ENTRY %entry { p.0 = s32[16,16] parameter(0), sharding={unknown shard_as 0} p.1 = s32[16,16] parameter(1), sharding={devices=[4,2]0,1,2,3,4,5,6,7} add.1 = s32[16,16] add(p.0, p.0) init = s32[] constant(0) reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add add.2 = s32[16,16] add(p.1, p.1) sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0} reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add ROOT mul = s32[] multiply(reduce.1, reduce.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {true, true}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* p_0 = FindInstruction(module.get(), "p.0"); ASSERT_NE(p_0, nullptr); auto* add_2 = FindInstruction(module.get(), "add.2"); ASSERT_NE(add_2, nullptr); EXPECT_THAT(add_2, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}")); EXPECT_EQ(p_0->sharding(), add_2->sharding()); } TEST_F(ShardingPropagationTest, PropagateShardAsToOutputs) { const char* const hlo_string = R"( HloModule module %add { %lhs = s32[] parameter(0) %rhs = s32[] parameter(1) ROOT %add = s32[] add(%lhs, %rhs) } ENTRY %entry { p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7} add.1 = s32[16,16] add(p.0, p.0) sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0} init = s32[] constant(0) reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add broadcast.1 = s32[16,16] broadcast(reduce.1), dimensions={} ROOT mul = s32[16,16] multiply(broadcast.1, broadcast.1), sharding={unknown shard_as 0} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* add_1 = FindInstruction(module.get(), "add.1"); ASSERT_NE(add_1, nullptr); auto* output = FindInstruction(module.get(), "mul"); ASSERT_NE(output, nullptr); EXPECT_THAT(add_1, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}")); EXPECT_EQ(add_1->sharding(), output->sharding()); } TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput) { const char* const hlo_string = R"( HloModule jit_zeros_like ENTRY main.6 { Arg_0.1 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]} custom-call.4 = s64[8,2]{1,0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={unknown shard_as 0} constant.2 = s64[] constant(0) broadcast.3 = s64[8,2]{1,0} broadcast(constant.2), dimensions={} ROOT custom-call.5 = s64[8,2]{1,0} custom-call(broadcast.3), custom_call_target="Sharding", sharding={unknown shard_as 0} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_TRUE(changed); VLOG(1) << module->ToString(); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}")); } TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput2) { const char* const hlo_string = R"( HloModule jit_f, entry_computation_layout={(f32[8]{0:T(256)})->(f32[8]{0:T(256)}, f32[8]{0:T(256)})}, allow_spmd_sharding_propagation_to_output={true,true}, num_partitions=4 ENTRY main.9 { Arg_0.1 = f32[8]{0} parameter(0) custom-call.6 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206} custom-call.4 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={devices=[4]<=[4]}, metadata={op_name="jit(f)/jit(main)/sharding_constraint[sharding=GSPMDSharding({devices=[4]<=[4]}) resource_env=ResourceEnv(mesh=Mesh(), ()) unconstrained_dims=set()]" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=204} constant.0 = f32[] constant(2) broadcast.0 = f32[8]{0} broadcast(constant.0), dimensions={} multiply.5 = f32[8]{0} multiply(custom-call.4, broadcast.0), metadata={op_name="jit(f)/jit(main)/mul" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=205} custom-call.7 = f32[8]{0} custom-call(multiply.5), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206} ROOT tuple.8 = (f32[8]{0}, f32[8]{0}) tuple(custom-call.6, custom-call.7) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true, true}, {true}) .Run(module.get())); EXPECT_TRUE(changed); VLOG(1) << module->ToString(); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Sharding("{{devices=[4]<=[4]}, {devices=[4]<=[4]}}")); } TEST_F(ShardingPropagationTest, LookaheadUsersOfDot) { const char* const hlo_string = R"( HloModule module ENTRY %entry { p0 = bf16[512,512,1024]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]} p1 = bf16[512,512,16,128]{3,2,1,0} parameter(1), sharding={devices=[16,1,4,1]<=[64]} p2 = bf16[16,1024,16,128]{3,2,1,0} parameter(2), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate} p3 = s32[] parameter(3) dot.1 = bf16[1024,16,128]{2,1,0} dot(p0, p1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} reshape.1 = bf16[1,1024,16,128]{3,2,1,0} reshape(dot.1) constant.1 = s32[] constant(0) ROOT dynamic-update-slice.113 = bf16[16,1024,16,128]{3,2,1,0} dynamic-update-slice(p2, reshape.1, p3, constant.1, constant.1, constant.1), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "dot.1"); EXPECT_THAT(instruction, op::Sharding( "{devices=[4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingArray) { const char* const hlo_string = R"( HloModule module called_computation { p0 = s32[8] parameter(0) p1 = s32[8] parameter(1) ROOT add = s32[8] add(p0, p1) }, execution_thread="thread_1" ENTRY entry_computation { p0 = s32[8] parameter(0), sharding={manual} p1 = s32[8] parameter(1), sharding={manual} async-start = ((s32[8], s32[8]), s32[8], u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation ROOT async-done = s32[8] call-done(async-start) }, execution_thread="thread_0" )"; { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get(), {"thread_0"})); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "async-start"); ASSERT_NE(instruction, nullptr); EXPECT_THAT(instruction, op::Sharding("{{manual}, {manual}, {manual}, {manual}}")); auto* async_done = FindInstruction(module.get(), "async-done"); ASSERT_NE(async_done, nullptr); EXPECT_THAT(async_done, op::Sharding("{manual}")); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get(), {"thread_0", "thread_1"})); EXPECT_FALSE(changed); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_FALSE(changed); } } TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingTuple) { const char* const hlo_string = R"( HloModule module called_computation { p0 = s32[8] parameter(0) p1 = s32[8] parameter(1) add = s32[8] add(p0, p1) mul = s32[8] multiply(p0, p1) ROOT result = (s32[8], s32[8]) tuple(add, mul) }, execution_thread="thread_1" ENTRY entry_computation { p0 = s32[8] parameter(0), sharding={manual} p1 = s32[8] parameter(1), sharding={manual} async-start = ((s32[8], s32[8]), (s32[8], s32[8]), u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation ROOT async-done = (s32[8], s32[8]) call-done(async-start) }, execution_thread="thread_0" )"; { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get(), {"thread_0"})); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* async_start = FindInstruction(module.get(), "async-start"); ASSERT_NE(async_start, nullptr); EXPECT_THAT( async_start, op::Sharding("{{manual}, {manual}, {manual}, {manual}, {manual}}")); auto* async_done = FindInstruction(module.get(), "async-done"); ASSERT_NE(async_done, nullptr); EXPECT_THAT(async_done, op::Sharding("{{manual}, {manual}}")); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get(), {"thread_0", "thread_1"})); EXPECT_FALSE(changed); } { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_FALSE(changed); } } TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier) { const char* const hlo_string = R"( HloModule pjit_f ENTRY main.11 { Arg_0.1 = bf16[384,1408]{1,0} parameter(0), sharding={devices=[1,16,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate} broadcast.4 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2} custom-call.5 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.4), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1} broadcast.2 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2} custom-call.3 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.2), custom_call_target="Sharding", sharding={devices=[8,1,1,1024]<=[8192] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2]" custom-call.6 = bf16[8,384,1408]{2,1,0} custom-call(custom-call.3), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1} %shard-barrier-to = bf16[8,384,1408]{2,1,0} custom-call(%custom-call.6), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true slice.7 = bf16[1,384,1408]{2,1,0} slice(shard-barrier-to), slice={[1:2], [0:384], [0:1408]} reshape.8 = bf16[384,1408]{1,0} reshape(slice.7) tuple.9 = (bf16[384,1408]{1,0}) tuple(reshape.8) get-tuple-element.10 = bf16[384,1408]{1,0} get-tuple-element(tuple.9), index=0, sharding={devices=[16,1,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate} ROOT tuple.13 = (bf16[384,1408]{1,0}, bf16[8,384,1408]{2,1,0}) tuple(get-tuple-element.10, custom-call.5) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* broadcast_4 = FindInstruction(module.get(), "broadcast.4"); ASSERT_NE(broadcast_4, nullptr); EXPECT_THAT( broadcast_4, op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}")); auto* copy = FindInstruction(module.get(), "copy"); ASSERT_NE(copy, nullptr); EXPECT_THAT( copy, op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier2) { const char* const hlo_string = R"( HloModule module ENTRY %elementwise { %param0 = f32[5,7,11,13]{3,2,1,0} parameter(0) %custom-call.0 = f32[5,7,11,13]{3,2,1,0} custom-call(param0), custom_call_target="Sharding", sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2,3]" %shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%custom-call.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true %custom-call.2 = f32[5,7,11,13]{3,2,1,0} custom-call(shard-barrier-from), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1} %param1 = f32[5,7,11,13]{3,2,1,0} parameter(1) %custom-call.1 = f32[5,7,11,13]{3,2,1,0} custom-call(param1), custom_call_target="Sharding", sharding={devices=[1,2,2,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}, backend_config="unspecified_dims=[0]" %custom-call.3 = f32[5,7,11,13]{3,2,1,0} custom-call(custom-call.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1} ROOT %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(%custom-call.0, %custom-call.3) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {false, false}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); EXPECT_THAT( module->entry_computation()->root_instruction(), op::Sharding( "{{devices=[2,2,2,1]<=[8]}, {devices=[1,2,2,1,2]<=[2,4]T(1,0) " "last_tile_dim_replicate}}")); } TEST_F(ShardingPropagationTest, CallPropagation) { const absl::string_view hlo_string = R"( HloModule module called_computation { p0 = bf16[20,2,68096,8512] parameter(0) %add_called_comp = bf16[20,2,68096,8512] add(p0, p0) ROOT tuple = (bf16[20,2,68096,8512]) tuple(add_called_comp) } ENTRY main { %param0 = bf16[20,2,68096,8512] parameter(0) %add = bf16[20,2,68096,8512] add(param0, param0) ROOT %call = (bf16[20,2,68096,8512]) call(add), to_apply=%called_computation, sharding={{devices=[1,1,16,64]<=[64,16]T(1,0)}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* add = FindInstruction(module.get(), "add"); ASSERT_NE(add, nullptr); EXPECT_THAT(add, op::Sharding("{devices=[1,1,16,64]<=[64,16]T(1,0)}")); } TEST_F(ShardingPropagationTest, CallPropagationWithSPMDShardToFullShape) { const absl::string_view hlo_string = R"( HloModule module called_computation { p0 = bf16[4096,4096] parameter(0) %add_called_comp = bf16[4096,4096] add(p0, p0) ROOT tuple = (bf16[4096,4096]) tuple(add_called_comp) } ENTRY main { %param0 = bf16[4096,4096] parameter(0) %add = bf16[4096,4096] add(param0, param0) %custom-call.1 = bf16[4096,4096]{1,0} custom-call(add), custom_call_target="Sharding", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate} %custom-call.2 = bf16[2048,4096]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual} %custom-call.3 = bf16[2048,4096]{1,0} custom-call(custom-call.2), custom_call_target="Sharding", sharding={manual} %custom-call.4 = bf16[4096,4096]{1,0} custom-call(bf16[2048,4096]{1,0} %custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate} ROOT %call = (bf16[4096,4096]) call(custom-call.4), to_apply=%called_computation, sharding={devices=[2,2]<=[4]} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {false}, {false}) .Run(module.get())); XLA_VLOG_LINES(1, module->ToString()); EXPECT_TRUE(changed); auto* custom_call_4 = FindInstruction(module.get(), "custom-call.4"); ASSERT_NE(custom_call_4, nullptr); auto* operand = custom_call_4->operand(0); EXPECT_THAT(operand, op::Shape("bf16[2048,4096]")); EXPECT_THAT(custom_call_4, op::Shape("bf16[4096,4096]")); EXPECT_THAT(custom_call_4, op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}")); } TEST_F(ShardingPropagationTest, ReplicateRngBitGeneratorSeed) { const char* const hlo_string = R"( HloModule module apply_or { x = u64[] parameter(0) y = u64[] parameter(1) ROOT x_or_y = or(x, y) } ENTRY main { p = s32[2,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]} up = u64[2,2] convert(p) i = u64[] constant(0) seed = u64[2] reduce(up, i), dimensions={1}, to_apply=apply_or rbg = u32[2048,4096] rng-bit-generator(seed), algorithm=rng_default ROOT s = u32[2048,4096]{1,0} custom-call(rbg), custom_call_target="Sharding", sharding={devices=[2,2]<=[4]} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, ShardingPropagation( true, true, {true}, {true}) .Run(module.get())); EXPECT_TRUE(changed); XLA_VLOG_LINES(1, module->ToString()); auto* instruction = FindInstruction(module.get(), "seed"); EXPECT_TRUE(instruction->sharding().IsReplicated()); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4a0d1112-f66c-4f43-aa09-131a59dde473
cpp
tensorflow/tensorflow
save
tensorflow/cc/experimental/libexport/save.cc
tensorflow/cc/experimental/libexport/save_test.cc
#include "tensorflow/cc/experimental/libexport/save.h" #include "tensorflow/core/platform/env.h" namespace tensorflow { namespace libexport { Status Save(const std::string& export_dir) { TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir)); return absl::OkStatus(); } } }
#include "tensorflow/cc/experimental/libexport/save.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace libexport { namespace { TEST(SaveTest, TestDirectoryStructure) { const string base_dir = tensorflow::io::JoinPath( tensorflow::testing::TmpDir(), "test_directory_structure"); TF_ASSERT_OK(Save(base_dir)); TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
10b07a94-816f-47d6-ab5a-942410b3ce6e
cpp
tensorflow/tensorflow
fuse_auto_input
tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc
tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h" #include <any> #include <string> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_replace.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { namespace { std::pair<std::string, std::string> MakeValueReplacement(int n, int k) { return {absl::StrCat("value_", n), absl::StrCat("value_", k)}; } std::pair<std::string, std::string> MakeDataReplacement(int n, int k) { return {absl::StrCat("input_data_", n), absl::StrCat("input_data_", k)}; } } TransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) { auto& node_attr = std::any_cast<CompiledNodeAttributes&>(node->operation.attributes); auto& node_code = node_attr.code; if (node_code.input != IOStructure::AUTO) { return {TransformStatus::SKIPPED, ""}; } uint3 workgroup = node_code.workgroup; auto node_outputs = graph->FindOutputs(node->id); std::vector<std::pair<Node*, int>> nodes_to_fuse; std::vector<std::pair<ValueId, int>> input_values; int input_num = -1; for (auto input_value : graph->FindInputs(node->id)) { input_num++; const ValueId input_id = input_value->id; input_values.push_back({input_id, input_num}); if (graph->FindConsumers(input_id).size() > 1) { continue; } Node* input_producer = graph->FindProducer(input_id); if (input_producer == nullptr) { continue; } if (graph->FindOutputs(input_producer->id).size() != 1) { continue; } auto& input_producer_attr = std::any_cast<const CompiledNodeAttributes&>( input_producer->operation.attributes); if (input_producer_attr.code.output != IOStructure::AUTO) { continue; } if (input_producer_attr.code.workload != node_code.workload && uint3() != input_producer_attr.code.workload) { continue; } if (input_producer_attr.code.workgroup != uint3()) { if (workgroup != uint3()) { continue; } workgroup = input_producer_attr.code.workgroup; } nodes_to_fuse.push_back({input_producer, input_num}); input_values.pop_back(); } if (nodes_to_fuse.empty()) { return {TransformStatus::SKIPPED, ""}; } { absl::flat_hash_set<ValueId> all_inputs; for (const auto& node_to_fuse : nodes_to_fuse) { for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) { if (all_inputs.find(input->id) != all_inputs.end()) { return {TransformStatus::SKIPPED, ""}; } all_inputs.insert(input->id); } } for (const auto& input : graph->FindInputs(node->id)) { if (all_inputs.find(input->id) != all_inputs.end()) { return {TransformStatus::SKIPPED, ""}; } all_inputs.insert(input->id); } } for (auto value : graph->FindInputs(node->id)) { if (!graph->RemoveConsumer(node->id, value->id).ok()) { return {TransformStatus::INVALID, ""}; } } std::string operation_type; std::string source_code; std::string values; std::swap(source_code, node_code.source_code); int extra_input_num = input_num; input_num = 0; for (auto input_and_num : nodes_to_fuse) { auto& input = input_and_num.first; auto& attr = std::any_cast<CompiledNodeAttributes&>(input->operation.attributes); auto super_inputs = graph->FindInputs(input->id); std::vector<std::pair<std::string, std::string>> replacements; for (int i = 0; i < super_inputs.size(); ++i) { int value_index = i == 0 ? input_and_num.second : ++extra_input_num; replacements.push_back(MakeValueReplacement(i, value_index)); replacements.push_back(MakeDataReplacement(i, input_num)); if (attr.code.input == IOStructure::AUTO) { absl::StrAppend(&values, " value_", value_index, " = $input_data_", input_num, "[gid.x, gid.y, gid.z]$;\n"); } if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) { return {TransformStatus::INVALID, ""}; } input_num++; } for (auto& param : attr.code.parameters) { param.name = absl::StrReplaceAll(param.name, replacements); } attr.code.source_code = absl::StrReplaceAll(attr.code.source_code, replacements); if (!MergeCode(&attr, &node_attr).ok()) { return {TransformStatus::INVALID, "Unable to merge the code"}; } absl::StrAppend(&node_attr.code.source_code, "{\n", attr.code.source_code, "\n}"); if (!operation_type.empty()) { operation_type += ","; } operation_type += input->operation.type; if (!graph->DeleteNode(input->id).ok()) { return {TransformStatus::INVALID, ""}; } } for (int i = 0; i < input_values.size(); i++) { if (node_code.input == IOStructure::AUTO) { absl::StrAppend(&values, " value_", input_values[i].second, " = $input_data_", input_num, "[gid.x, gid.y, gid.z]$;\n"); } if (!graph->AddConsumer(node->id, input_values[i].first).ok()) { return {TransformStatus::INVALID, ""}; } input_num++; } node_code.input = IOStructure::ONLY_DEFINITIONS; absl::StrAppend(&node->operation.type, "(", operation_type, ")"); node_code.source_code = absl::StrCat(values, node_code.source_code, "{ node->operation.type, "\n", source_code, "\n}"); return {TransformStatus::APPLIED, ""}; } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h" #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { namespace { TEST(FuseAutoInputTest, SkipsDiamond) { GraphFloat32 graph; auto* v0 = graph.NewValue(); auto* v1 = graph.NewValue(); auto* v2 = graph.NewValue(); auto* v3 = graph.NewValue(); auto* n1 = graph.NewNode(); CompiledNodeAttributes a1; a1.code.output = IOStructure::AUTO; n1->operation.attributes = std::move(a1); ASSERT_OK(graph.AddConsumer(n1->id, v0->id)); ASSERT_OK(graph.SetProducer(n1->id, v1->id)); auto* n2 = graph.NewNode(); CompiledNodeAttributes a2; a2.code.output = IOStructure::AUTO; n2->operation.attributes = std::move(a2); ASSERT_OK(graph.AddConsumer(n2->id, v0->id)); ASSERT_OK(graph.SetProducer(n2->id, v2->id)); auto* n3 = graph.NewNode(); CompiledNodeAttributes a3; a3.code.input = IOStructure::AUTO; n3->operation.attributes = std::move(a3); ASSERT_OK(graph.AddConsumer(n3->id, v1->id)); ASSERT_OK(graph.AddConsumer(n3->id, v2->id)); ASSERT_OK(graph.SetProducer(n3->id, v3->id)); FuseAutoInput fuse_auto_input; EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status, TransformStatus::SKIPPED); } TEST(FuseAutoInputTest, SkipsTriangle) { GraphFloat32 graph; auto* v0 = graph.NewValue(); auto* v1 = graph.NewValue(); auto* v2 = graph.NewValue(); auto* n1 = graph.NewNode(); CompiledNodeAttributes a1; a1.code.output = IOStructure::AUTO; n1->operation.attributes = std::move(a1); ASSERT_OK(graph.AddConsumer(n1->id, v0->id)); ASSERT_OK(graph.SetProducer(n1->id, v1->id)); auto* n2 = graph.NewNode(); CompiledNodeAttributes a2; a2.code.input = IOStructure::AUTO; n2->operation.attributes = std::move(a2); ASSERT_OK(graph.AddConsumer(n2->id, v0->id)); ASSERT_OK(graph.AddConsumer(n2->id, v1->id)); ASSERT_OK(graph.SetProducer(n2->id, v2->id)); FuseAutoInput fuse_auto_input; EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status, TransformStatus::SKIPPED); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2e05ec21-ce2b-4ad2-803e-f2288754db8c
cpp
google/arolla
backend_operator
arolla/expr/operator_loader/backend_operator.cc
arolla/expr/operator_loader/backend_operator_test.cc
#include "arolla/expr/operator_loader/backend_operator.h" #include <memory> #include <set> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "arolla/expr/basic_expr_operator.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_attributes.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/operator_loader/parameter_qtypes.h" #include "arolla/expr/operator_loader/qtype_constraint.h" #include "arolla/expr/operator_loader/qtype_inference.h" #include "arolla/util/fingerprint.h" #include "arolla/util/status_macros_backport.h" namespace arolla::operator_loader { using ::arolla::expr::ExprAttributes; using ::arolla::expr::ExprNodePtr; using ::arolla::expr::ExprOperatorPtr; using ::arolla::expr::ExprOperatorSignature; using ::arolla::expr::GetPlaceholderKeys; absl::StatusOr<ExprOperatorPtr> BackendOperator::Make( absl::string_view name, ExprOperatorSignature signature, absl::string_view doc, std::vector<QTypeConstraint> qtype_constraints, ExprNodePtr qtype_inference_expr) { RETURN_IF_ERROR(ValidateSignature(signature)); absl::flat_hash_set<absl::string_view> parameter_names; for (const auto& param : signature.parameters) { parameter_names.insert(param.name); } std::set<std::string> undefined_parameter_names; for (const auto& qtype_constraint : qtype_constraints) { for (auto&& placeholder_key : GetPlaceholderKeys(qtype_constraint.predicate_expr)) { if (!parameter_names.contains(placeholder_key)) { undefined_parameter_names.insert(std::move(placeholder_key)); } } } for (auto&& placeholder_key : GetPlaceholderKeys(qtype_inference_expr)) { if (!parameter_names.contains(placeholder_key)) { undefined_parameter_names.insert(std::move(placeholder_key)); } } if (!undefined_parameter_names.empty()) { return absl::InvalidArgumentError( "unexpected parameters: P." + absl::StrJoin(undefined_parameter_names, ", P.")); } ASSIGN_OR_RETURN( auto qtype_inference_fn, MakeQTypeInferenceFn(qtype_constraints, qtype_inference_expr)); FingerprintHasher hasher("::arolla::operator_loader::BackendOperator"); hasher.Combine(name, signature, doc, qtype_inference_expr->fingerprint(), qtype_constraints.size()); for (const auto& qtype_constraint : qtype_constraints) { hasher.Combine(qtype_constraint.predicate_expr->fingerprint(), qtype_constraint.error_message); } return std::make_shared<BackendOperator>( PrivateConstructorTag{}, name, std::move(signature), doc, std::move(hasher).Finish(), std::move(qtype_constraints), std::move(qtype_inference_expr), std::move(qtype_inference_fn)); } BackendOperator::BackendOperator(PrivateConstructorTag, absl::string_view name, ExprOperatorSignature signature, absl::string_view doc, Fingerprint fingerprint, std::vector<QTypeConstraint> qtype_constraints, ExprNodePtr qtype_inference_expr, QTypeInferenceFn qtype_inference_fn) : ExprOperatorWithFixedSignature(name, std::move(signature), doc, fingerprint), qtype_constraints_(std::move(qtype_constraints)), qtype_inference_expr_(std::move(qtype_inference_expr)), qtype_inference_fn_(std::move(qtype_inference_fn)) {} absl::StatusOr<ExprAttributes> BackendOperator::InferAttributes( absl::Span<const ExprAttributes> inputs) const { RETURN_IF_ERROR(ValidateOpInputsCount(inputs)); ASSIGN_OR_RETURN(auto parameter_qtypes, ExtractParameterQTypes(signature(), inputs)); ASSIGN_OR_RETURN(auto* output_qtype, qtype_inference_fn_(parameter_qtypes)); return ExprAttributes(output_qtype); } absl::string_view BackendOperator::py_qvalue_specialization_key() const { return "::arolla::operator_loader::BackendOperator"; } }
#include "arolla/expr/operator_loader/backend_operator.h" #include <memory> #include <optional> #include <utility> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "absl/status/statusor.h" #include "arolla/array/array.h" #include "arolla/array/qtype/types.h" #include "arolla/dense_array/dense_array.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/expr/eval/invoke.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/operator_loader/qtype_constraint.h" #include "arolla/memory/optional_value.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/tuple_qtype.h" #include "arolla/util/unit.h" #include "arolla/util/status_macros_backport.h" namespace arolla::operator_loader { namespace { using ::absl_testing::IsOkAndHolds; using ::absl_testing::StatusIs; using ::arolla::expr::CallOp; using ::arolla::expr::ExprOperatorPtr; using ::arolla::expr::ExprOperatorSignature; using ::arolla::expr::Literal; using ::arolla::expr::Placeholder; using ::testing::HasSubstr; class BackendOperatorTest : public ::testing::Test { protected: absl::StatusOr<std::shared_ptr<const BackendOperator>> MakeOp() { ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_1, CallOp("core.not_equal", {CallOp("qtype.get_scalar_qtype", {Placeholder("x")}), Literal(GetNothingQType())})); ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_2, CallOp("core.not_equal", {CallOp("qtype.get_scalar_qtype", {Placeholder("y")}), Literal(GetNothingQType())})); ASSIGN_OR_RETURN( auto qtype_constraint_predicate_expr_3, CallOp("core.not_equal", {CallOp("qtype.broadcast_qtype_like", {Placeholder("y"), Placeholder("x")}), Literal(GetNothingQType())})); std::vector<QTypeConstraint> qtype_constraints = { {qtype_constraint_predicate_expr_1, "expected `x` to be a scalar based type, got {x}"}, {qtype_constraint_predicate_expr_2, "expected `y` to be a UNIT based type, got {y}"}, {qtype_constraint_predicate_expr_3, "incompatible types x:{x} and y:{y}"}, }; ASSIGN_OR_RETURN(auto qtype_inference_expr, CallOp("qtype.broadcast_qtype_like", {Placeholder("y"), Placeholder("x")})); ASSIGN_OR_RETURN( auto op, BackendOperator::Make( "core.presence_and", ExprOperatorSignature{{"x"}, {"y"}}, "presence-and-doc-string", std::move(qtype_constraints), std::move(qtype_inference_expr))); return std::dynamic_pointer_cast<const BackendOperator>(op); } }; TEST_F(BackendOperatorTest, GetDoc) { ASSERT_OK_AND_ASSIGN(auto op, MakeOp()); ASSERT_THAT(op.get()->doc(), "presence-and-doc-string"); ASSERT_THAT(op->GetDoc(), IsOkAndHolds("presence-and-doc-string")); } TEST_F(BackendOperatorTest, QTypeInference) { { ASSERT_OK_AND_ASSIGN(auto expr, CallOp(MakeOp(), {Literal(1.5f), Literal(kUnit)})); EXPECT_EQ(expr->qtype(), GetQType<float>()); } { ASSERT_OK_AND_ASSIGN( auto expr, CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue<Unit>())})); EXPECT_EQ(expr->qtype(), GetQType<OptionalValue<float>>()); } } TEST_F(BackendOperatorTest, QTypeConstraint) { EXPECT_THAT( CallOp(MakeOp(), {Literal(MakeTupleFromFields()), Literal(kUnit)}), StatusIs( absl::StatusCode::kInvalidArgument, HasSubstr("expected `x` to be a scalar based type, got tuple<>"))); EXPECT_THAT( CallOp(MakeOp(), {Literal(1.5f), Literal(MakeTupleFromFields())}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("expected `y` to be a UNIT based type, got tuple<>"))); EXPECT_THAT( CallOp(MakeOp(), {Literal(Array<float>()), Literal(DenseArray<Unit>())}), StatusIs( absl::StatusCode::kInvalidArgument, HasSubstr( "incompatible types x:ARRAY_FLOAT32 and y:DENSE_ARRAY_UNIT"))); } TEST_F(BackendOperatorTest, Eval) { ASSERT_OK_AND_ASSIGN( auto expr, CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue<Unit>())})); ASSERT_OK_AND_ASSIGN(auto result_tv, Invoke(expr, {})); ASSERT_OK_AND_ASSIGN(auto result, result_tv.As<OptionalValue<float>>()); EXPECT_EQ(result.get(), std::nullopt); } TEST_F(BackendOperatorTest, UnexpectedParameters) { ASSERT_OK_AND_ASSIGN(auto op, MakeOp()); auto& backend_op = dynamic_cast<const BackendOperator&>(*op); EXPECT_THAT(BackendOperator::Make("core.presence_and", ExprOperatorSignature{{"a"}, {"b"}}, "docstring", backend_op.qtype_constraints(), backend_op.qtype_inference_expr()), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("unexpected parameters: P.x, P.y"))); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
74a88cec-4e14-40d9-854d-624c46d9da7a
cpp
google/arolla
regex
arolla/qtype/strings/regex.cc
arolla/qtype/strings/regex_test.cc
#include "arolla/qtype/strings/regex.h" #include <memory> #include <string> #include "absl/base/nullability.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "arolla/qtype/simple_qtype.h" #include "arolla/util/fingerprint.h" #include "arolla/util/repr.h" #include "re2/re2.h" namespace arolla { namespace { class RE2Regex final : public Regex { public: explicit RE2Regex(absl::string_view pattern) : re2_(pattern, RE2::Quiet) {} bool ok() const { return re2_.ok(); } absl::string_view error() const { return re2_.error(); } absl::string_view pattern() const final { return re2_.pattern(); } int NumberOfCapturingGroups() const final { return re2_.NumberOfCapturingGroups(); } bool PartialMatch(absl::string_view text) const final { return re2_.PartialMatch(text, re2_); } bool PartialMatch(absl::string_view text, std::string* match) const final { return RE2::PartialMatch(text, re2_, match); } private: RE2 re2_; }; } absl::StatusOr<absl::Nonnull<RegexPtr>> CompileRegex( absl::string_view pattern) { auto result = std::make_shared<RE2Regex>(pattern); if (result->ok()) { return result; } return absl::InvalidArgumentError(absl::StrCat( "invalid regular expression: `", pattern, "`; ", result->error())); } void FingerprintHasherTraits<RegexPtr>::operator()( FingerprintHasher* hasher, const RegexPtr& value) const { if (value != nullptr) { hasher->Combine(value->pattern()); } } ReprToken ReprTraits<RegexPtr>::operator()(const RegexPtr& value) const { if (value == nullptr) { return {"regex{}"}; } return {absl::StrCat("regex{`", value->pattern(), "`}")}; } AROLLA_DEFINE_SIMPLE_QTYPE(REGEX, RegexPtr) }
#include "arolla/qtype/strings/regex.h" #include <string> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_value.h" #include "arolla/util/fingerprint.h" #include "arolla/util/repr.h" using ::absl_testing::StatusIs; using ::testing::HasSubstr; namespace arolla { namespace { TEST(Regex, NoCapturingGroups) { ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("\\d+ bottles of beer")); ASSERT_NE(regex, nullptr); EXPECT_EQ(regex->NumberOfCapturingGroups(), 0); EXPECT_TRUE(regex->PartialMatch("100 bottles of beer")); std::string match; EXPECT_FALSE(regex->PartialMatch("100 bottles of beer", &match)); } TEST(Regex, OneCapturingGroup) { ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) bottles of beer")); ASSERT_NE(regex, nullptr); EXPECT_EQ(regex->NumberOfCapturingGroups(), 1); EXPECT_TRUE(regex->PartialMatch("100 bottles of beer")); std::string match; EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match)); EXPECT_EQ(match, "100"); } TEST(Regex, ManyCapturingGroup) { ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) (bottles) (of) beer")); ASSERT_NE(regex, nullptr); EXPECT_EQ(regex->NumberOfCapturingGroups(), 3); EXPECT_TRUE(regex->PartialMatch("100 bottles of beer")); std::string match; EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match)); EXPECT_EQ(match, "100"); } TEST(Regex, Repr) { ASSERT_OK_AND_ASSIGN(auto regex1, CompileRegex("abc")); ASSERT_OK_AND_ASSIGN(auto regex2, CompileRegex("a.c")); EXPECT_EQ(regex1->pattern(), "abc"); EXPECT_EQ(regex2->pattern(), "a.c"); EXPECT_EQ(Repr(RegexPtr{}), "regex{}"); EXPECT_EQ(Repr(regex1), "regex{`abc`}"); EXPECT_EQ(Repr(regex2), "regex{`a.c`}"); } TEST(Regex, Fingerprint) { ASSERT_OK_AND_ASSIGN(auto regex1_1, CompileRegex("abc")); ASSERT_OK_AND_ASSIGN(auto regex1_2, CompileRegex("abc")); ASSERT_OK_AND_ASSIGN(auto regex2_1, CompileRegex("a.c")); ASSERT_OK_AND_ASSIGN(auto regex2_2, CompileRegex("a.c")); auto fingerprint0_1 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish(); auto fingerprint0_2 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish(); auto fingerprint1_1 = FingerprintHasher("salt").Combine(regex1_1).Finish(); auto fingerprint1_2 = FingerprintHasher("salt").Combine(regex1_2).Finish(); auto fingerprint2_1 = FingerprintHasher("salt").Combine(regex2_1).Finish(); auto fingerprint2_2 = FingerprintHasher("salt").Combine(regex2_2).Finish(); EXPECT_EQ(fingerprint0_1, fingerprint0_2); EXPECT_EQ(fingerprint1_1, fingerprint1_2); EXPECT_EQ(fingerprint2_1, fingerprint2_2); EXPECT_NE(fingerprint0_1, fingerprint1_1); EXPECT_NE(fingerprint1_1, fingerprint2_1); EXPECT_NE(fingerprint2_1, fingerprint0_1); } TEST(Regex, QType) { EXPECT_EQ(GetQType<RegexPtr>()->name(), "REGEX"); EXPECT_EQ(GetQType<RegexPtr>()->type_info(), typeid(RegexPtr)); ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("a.c")); auto qvalue = TypedValue::FromValue(regex); EXPECT_EQ(qvalue.Repr(), "regex{`a.c`}"); } TEST(Regex, CompilationError) { EXPECT_THAT(CompileRegex("ab\\αcd"), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("invalid regular expression: `ab\\αcd`;"))); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
16498d21-1d10-4b45-ae7a-9b43a041a5b6
cpp
tensorflow/tensorflow
memory_space_propagation
third_party/xla/xla/service/memory_space_propagation.cc
third_party/xla/xla/service/memory_space_propagation_test.cc
#include "xla/service/memory_space_propagation.h" #include <cstdint> #include "xla/shape.h" #include "xla/shape_util.h" namespace xla { absl::StatusOr<bool> MemorySpacePropagation::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool modified = false; TF_ASSIGN_OR_RETURN(auto dataflow_analysis, HloDataflowAnalysis::Run(*module, false, true)); dataflow_analysis_ = std::move(dataflow_analysis); for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kFusion) { for (int operand_idx = 0; operand_idx < instruction->fused_parameters().size(); ++operand_idx) { ShapeUtil::ForEachLeafShape( instruction->operand(operand_idx)->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { int64_t memory_space = sub_shape.layout().memory_space(); modified |= Propagate(index, instruction->fused_parameter(operand_idx), memory_space); }); } ShapeUtil::ForEachLeafShape( instruction->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) { int64_t memory_space = sub_shape.layout().memory_space(); modified |= Propagate(index, instruction->fused_expression_root(), memory_space); }); } } } return modified; } bool MemorySpacePropagation::Propagate(ShapeIndexView index, const HloInstruction* callee_instruction, int64_t memory_space) const { bool modified = false; const HloValue& value = dataflow_analysis_->GetUniqueValueAt( callee_instruction, ShapeIndex(index)); for (const HloPosition& position : value.positions()) { HloInstruction* instruction = position.instruction; Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(), position.index); if (shape->layout().memory_space() == memory_space) { continue; } shape->mutable_layout()->set_memory_space(memory_space); modified = true; if (instruction->opcode() == HloOpcode::kFusion) { Propagate(position.index, instruction->fused_expression_root(), memory_space); } const HloInstruction* parent_fusion = instruction->parent()->FusionInstruction(); if (instruction == instruction->parent()->root_instruction() && parent_fusion->parent()->IsFusionComputation()) { Propagate(position.index, parent_fusion, memory_space); } if (instruction->opcode() == HloOpcode::kParameter && parent_fusion->parent()->IsFusionComputation()) { const HloInstruction* fusion_operand = parent_fusion->operand(instruction->parameter_number()); Propagate(position.index, fusion_operand, memory_space); } } for (const HloUse& use : value.GetUses()) { if (use.instruction->opcode() == HloOpcode::kFusion) { modified |= Propagate( use.operand_index, use.instruction->fused_parameter(use.operand_number), memory_space); } } return modified; } }
#include "xla/service/memory_space_propagation.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace { class MemorySpacePropagationTest : public HloTestBase { public: MemorySpacePropagationTest() : HloTestBase(), verifier_(false, false) { } absl::Status Verify(HloModule* module) { return verifier_.Run(module).status(); } private: HloVerifier verifier_; }; TEST_F(MemorySpacePropagationTest, NoMemorySpace) { absl::string_view hlo_string = R"( HloModule NoMemorySpace %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)} copy(%param2) %fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_FALSE(memory_space_propagation.Run(module.get()).value()); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NonTupleOutput) { absl::string_view hlo_string = R"( HloModule NonTupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NonTupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, TupleOutput) { absl::string_view hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0 %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1 ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1) } )"; absl::string_view expected_hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation %gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0 %gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1 ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NestedInputFusion) { absl::string_view hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[3,2]{0,1:T(128)} parameter(0) ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[3,2]{0,1:T(128)} parameter(0) %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1) } ENTRY %entry { %param0 = s32[3,2]{0,1:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0) ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0) %fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1) } ENTRY %entry { %param0 = s32[3,2]{0,1:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[6]{0:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, NestedOutputFusion) { absl::string_view hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[6]{0:T(128)} parameter(0) ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion) } )"; absl::string_view expected_hlo_string = R"( HloModule NestedFusion %bitcast_fusion { %bf_param = s32[6]{0:T(128)} parameter(0) ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param) } %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1) ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) %fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } TEST_F(MemorySpacePropagationTest, BitcastInFusion) { absl::string_view hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)} parameter(0) %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation } )"; absl::string_view expected_hlo_string = R"( HloModule TupleOutput %fused_computation { %param_1.3 = s32[1]{0:T(128)} parameter(1) %constant.2 = s32[]{:T(128)} constant(-2147483648) %pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5 %param_2.3 = s32[5]{0:T(128)S(1)} parameter(2) %pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0 %maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3) %param_0.1 = s32[6]{0:T(128)S(1)} parameter(0) %bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1) %multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1) ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0) } ENTRY %entry { %param0 = s32[6]{0:T(128)} parameter(0) %param1 = s32[1]{0:T(128)} parameter(1) %param2 = s32[5]{0:T(128)} parameter(2) %arg0 = s32[6]{0:T(128)S(1)} copy(%param0) %arg1 = s32[1]{0:T(128)} copy(%param1) %arg2 = s32[5]{0:T(128)S(1)} copy(%param2) ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); MemorySpacePropagation memory_space_propagation; EXPECT_TRUE(memory_space_propagation.Run(module.get()).value()); TF_EXPECT_OK(Verify(module.get())); TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(expected_hlo_string)); EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9cb97eb8-0ab8-44ff-92e5-3ba9b6eeb3a6
cpp
tensorflow/tensorflow
cudnn_vectorize_convolutions
third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc
third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc
#include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include <cstdint> #include <optional> #include <string> #include <tuple> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/client/xla_builder.h" #include "xla/client/xla_computation.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/cudnn_support_utils.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { static std::vector<HloCustomCallInstruction*> GetRelevantConvs( HloComputation* comp) { std::vector<HloCustomCallInstruction*> convs; for (HloInstruction* instr : comp->instructions()) { if (instr->opcode() != HloOpcode::kCustomCall || (instr->custom_call_target() != kCudnnConvForwardCallTarget && instr->custom_call_target() != kCudnnConvBiasActivationForwardCallTarget) || instr->operand_count() < 2) { continue; } PrimitiveType input_ty = instr->operand(0)->shape().element_type(); PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type(); if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) { convs.push_back(Cast<HloCustomCallInstruction>(instr)); } } return convs; } static absl::StatusOr<HloComputation*> BuilderToHloComputation( XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) { TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root)); TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape()); HloModuleConfig config(program_shape); TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(comp.proto(), config)); HloModule* dest_module = sibling_computation->parent(); HloCloneContext context(dest_module); return dest_module->DeepCloneComputation(new_module->entry_computation(), &context); } static XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); DimensionVector new_dims(shape.dimensions().begin(), shape.dimensions().end()); CHECK_EQ(new_dims[dim] % vect_size, 0); new_dims[dim] /= vect_size; new_dims.insert(new_dims.begin() + dim + 1, vect_size); return Reshape(instr, new_dims); } static Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) { DimensionVector new_dims(shape.dimensions().begin(), shape.dimensions().end()); CHECK_EQ(new_dims[dim] % vect_size, 0); new_dims[dim] /= vect_size; new_dims.insert(new_dims.begin() + dim + 1, vect_size); return ShapeUtil::MakeShape(shape.element_type(), new_dims); } static XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) { XlaBuilder& b = *instr.builder(); int64_t rank = b.GetShape(instr)->dimensions_size(); DimensionVector idxs(rank); absl::c_iota(idxs, 0); if (src < dst) { idxs.insert(idxs.begin() + dst, src); idxs.erase(idxs.begin() + src); } else { idxs.erase(idxs.begin() + src); idxs.insert(idxs.begin() + dst, src); } return Transpose(instr, idxs); } static XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim, int64_t vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); auto size = [&](int64_t d) { return shape.dimensions(d); }; CHECK_LE(size(vect_dim), vect_size); CHECK_EQ(vect_size % size(vect_dim), 0); int64_t split_factor = vect_size / size(vect_dim); CHECK_EQ(size(dim) % split_factor, 0); instr = SplitAtDim(instr, dim, split_factor); if (vect_dim > dim) { vect_dim++; } instr = MoveDim(instr, dim + 1, vect_dim); if (vect_dim > dim) { vect_dim--; } return Collapse(instr, {vect_dim, vect_dim + 1}); } static XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim, int64_t orig_vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); auto size = [&](int64_t d) { return shape.dimensions(d); }; CHECK_GE(size(vect_dim), orig_vect_size); CHECK_EQ(size(vect_dim) % orig_vect_size, 0); instr = SplitAtDim(instr, vect_dim, orig_vect_size); if (dim > vect_dim) { dim++; } instr = MoveDim(instr, vect_dim, dim + 1); if (dim > vect_dim) { dim--; } return Collapse(instr, {dim, dim + 1}); } static ConvolutionDimensionNumbers VectorizeDnums( ConvolutionDimensionNumbers dnums, bool reordered_filter) { int64_t input_vect_dim = dnums.input_feature_dimension(); if (dnums.input_batch_dimension() > input_vect_dim) { dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1); } for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) { if (d > input_vect_dim) { ++d; } } if (!reordered_filter) { int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension(); if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) { dnums.set_kernel_output_feature_dimension( dnums.kernel_output_feature_dimension() + 1); } for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) { if (d > kernel_vect_dim) { ++d; } } } int64_t output_vect_dim = dnums.output_feature_dimension(); if (dnums.output_batch_dimension() > output_vect_dim) { dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1); } for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) { if (d > output_vect_dim) { ++d; } } return dnums; } absl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv, XlaOp* operands) { bool has_bias = conv->operand_count() > 2; VLOG(1) << "Reordering filter" << (has_bias ? " and bias" : "") << " (replacement for cudnnReorderFilterAndBias)"; auto builder = operands->builder(); ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers(); TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); config.set_reordered_int8_nchw_vect(true); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1])); TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering( filter_shape, dnums)); XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]); XlaOp transpose = Transpose(reshape, reorder.permutation); operands[1] = Reshape(reorder.result_shape, transpose); dnums.set_kernel_output_feature_dimension(0); dnums.set_kernel_input_feature_dimension(1); dnums.set_kernel_spatial_dimensions(0, 2); dnums.set_kernel_spatial_dimensions(1, 3); conv->set_convolution_dimension_numbers(dnums); if (has_bias) { TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2])); TF_ASSIGN_OR_RETURN(reorder, CudnnInferTransposeForBiasReordering(bias_shape)); reshape = Reshape(reorder.transpose_shape, operands[2]); transpose = Transpose(reshape, reorder.permutation); operands[2] = Reshape(reorder.result_shape, transpose); } return absl::OkStatus(); } static absl::StatusOr<bool> TryRevectorizeConv( const se::CudaComputeCapability& compute_capability, const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv, int vect_size) { const Shape& input_shape = conv->operand(0)->shape(); const Shape& kernel_shape = conv->operand(1)->shape(); const Shape& output_shape = conv->shape().tuple_shapes(0); const ConvolutionDimensionNumbers* dnums = &conv->convolution_dimension_numbers(); std::optional<int64_t> input_vect_dim; std::optional<int64_t> kernel_vect_dim; std::optional<int64_t> output_vect_dim; std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) = FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape, output_shape); if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() || !output_vect_dim.has_value()) { return false; } int64_t input_feat_size = input_shape.dimensions(dnums->input_feature_dimension()); int64_t output_feat_size = output_shape.dimensions(dnums->output_feature_dimension()); int64_t input_vect_size = input_shape.dimensions(*input_vect_dim); int64_t output_vect_size = output_shape.dimensions(*output_vect_dim); if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 || input_feat_size % (vect_size / input_vect_size) != 0 || output_feat_size % (vect_size / output_vect_size) != 0) { return false; } if (primitive_util::IsIntegralType(input_shape.element_type())) { TF_ASSIGN_OR_RETURN(bool supported_target_vectorization, CudnnSupportsOptimizedIntegerConvolution( compute_capability, *conv, vect_size)); if (!supported_target_vectorization) { VLOG(3) << "Skipping re-vectorization of conv to vector size: " << vect_size << ": " << conv->ToString(); return false; } } VLOG(1) << "Re-vectorizing conv channels from " << input_shape.dimensions(*input_vect_dim) << " to " << vect_size << ": " << conv->ToString(); XlaBuilder b(absl::StrCat(conv->name(), ".revectorized")); b.SetOpMetadata(conv->metadata()); XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter"); absl::InlinedVector<XlaOp, 4> new_operands = { RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), "input"), dnums->input_feature_dimension(), *input_vect_dim, vect_size), RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(), *kernel_vect_dim, vect_size), }; if (conv->operand_count() > 2) { new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias")); } if (conv->operand_count() > 3) { new_operands.push_back(RevectorizeInstr( Parameter(&b, 3, conv->operand(3)->shape(), "side_input"), dnums->input_feature_dimension(), *input_vect_dim, vect_size)); } if (conv->operand_count() > 4) { return InvalidArgument( "Don't understand a conv with more than 4 arguments: %s", conv->ToString()); } const auto& debug_options = conv->GetModule()->config().debug_options(); bool use_reordering = input_shape.element_type() == xla::S8 && vect_size == 32 && debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() && cudnn_version >= se::dnn::VersionInfo{8, 3, 0}; if (use_reordering) { int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim); if (kernel_vect_size == 4 || kernel_vect_size == 32) { new_operands[1] = filter; } TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data())); dnums = &conv->convolution_dimension_numbers(); } DimensionVector new_output_dims(output_shape.dimensions().begin(), output_shape.dimensions().end()); new_output_dims[dnums->output_feature_dimension()] /= (vect_size / output_vect_size); new_output_dims[*output_vect_dim] = vect_size; XlaOp new_conv = CustomCallWithConvDnums( &b, conv->custom_call_target(), new_operands, ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims), ShapeUtil::MakeShape(U8, {0})}), {}, conv->raw_backend_config_string(), false, {}, nullptr, conv->window(), *dnums); XlaOp new_conv_result = GetTupleElement(new_conv, 0); XlaOp new_conv_scratch = GetTupleElement(new_conv, 1); XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr( new_conv_result, dnums->output_feature_dimension(), *output_vect_dim, output_shape.dimensions(*output_vect_dim)); TF_ASSIGN_OR_RETURN( HloComputation * new_conv_comp, BuilderToHloComputation( b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}), conv->parent())); auto new_conv_comp_instrs = new_conv_comp->instructions(); auto new_conv_it = absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) { return instr->opcode() == HloOpcode::kCustomCall; }); if (new_conv_it != new_conv_comp_instrs.end()) { new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it, conv->name()); } VLOG(1) << "Re-vectorized conv to " << new_conv_comp->ToString(); TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction( conv, HloInstruction::CreateCall(conv->shape(), conv->operands(), new_conv_comp))); return true; } static absl::StatusOr<bool> TryVectorizeConv( const se::CudaComputeCapability& compute_capability, const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv, int64_t vect_size) { const Shape& input_shape = conv->operand(0)->shape(); const Shape& output_shape = conv->shape().tuple_shapes(0); const ConvolutionDimensionNumbers* dnums = &conv->convolution_dimension_numbers(); int64_t in_channels = input_shape.dimensions(dnums->input_feature_dimension()); int64_t out_channels = output_shape.dimensions(dnums->output_feature_dimension()); if (in_channels % vect_size != 0 || out_channels % vect_size != 0) { return false; } if (input_shape.dimensions_size() > 2 + dnums->input_spatial_dimensions_size()) { return false; } if (primitive_util::IsIntegralType(input_shape.element_type())) { TF_ASSIGN_OR_RETURN(bool supported_target_vectorization, CudnnSupportsOptimizedIntegerConvolution( compute_capability, *conv, vect_size)); if (!supported_target_vectorization) { VLOG(3) << "Skipping vectorization of conv to vector size: " << vect_size << ": " << conv->ToString(); return false; } } VLOG(1) << "Vectorizing conv channels by " << vect_size << ": " << conv->ToString(); XlaBuilder b(absl::StrCat(conv->name(), ".revectorized")); b.SetOpMetadata(conv->metadata()); XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter"); absl::InlinedVector<XlaOp, 4> new_operands = { SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), "input"), dnums->input_feature_dimension(), vect_size), SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size), }; if (conv->operand_count() > 2) { new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias")); } if (conv->operand_count() > 3) { new_operands.push_back( SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), "side_input"), dnums->output_feature_dimension(), vect_size)); } if (conv->operand_count() > 4) { return InvalidArgument( "Don't understand a conv with more than 4 arguments: %s", conv->ToString()); } const auto& debug_options = conv->GetModule()->config().debug_options(); bool use_reordering = input_shape.element_type() == xla::S8 && vect_size == 32 && debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() && cudnn_version >= se::dnn::VersionInfo{8, 3, 0}; if (use_reordering) { new_operands[1] = filter; TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data())); dnums = &conv->convolution_dimension_numbers(); } Shape new_output_shape = SplitShapeAtDim( output_shape, dnums->output_feature_dimension(), vect_size); XlaOp new_conv = CustomCallWithConvDnums( &b, conv->custom_call_target(), new_operands, ShapeUtil::MakeTupleShape( {new_output_shape, ShapeUtil::MakeShape(U8, {0})}), {}, conv->raw_backend_config_string(), false, {}, nullptr, conv->window(), VectorizeDnums(*dnums, use_reordering)); XlaOp new_conv_result = GetTupleElement(new_conv, 0); XlaOp new_conv_scratch = GetTupleElement(new_conv, 1); XlaOp conv_result_collapsed = Collapse(new_conv_result, {dnums->output_feature_dimension(), dnums->output_feature_dimension() + 1}); TF_ASSIGN_OR_RETURN( HloComputation * new_conv_comp, BuilderToHloComputation( b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}), conv->parent())); VLOG(1) << "Vectorized conv to: " << new_conv_comp->ToString(); TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction( conv, HloInstruction::CreateCall(conv->shape(), conv->operands(), new_conv_comp))); return true; } } absl::StatusOr<bool> CudnnVectorizeConvolutions::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) { bool local_changed = false; if (compute_capability_.IsAtLeast(7, 5)) { TF_ASSIGN_OR_RETURN( local_changed, TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32)); if (!local_changed) { TF_ASSIGN_OR_RETURN( local_changed, TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32)); } } if (!local_changed) { TF_ASSIGN_OR_RETURN( local_changed, TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4)); } changed |= local_changed; } } return changed; } } }
#include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include <cstdint> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/service/call_inliner.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class CudnnVectorizeConvolutionsTest : public HloTestBase { protected: absl::StatusOr<bool> Run(std::pair<int, int> compute_capability, HloModule* module) { CudnnVectorizeConvolutions pass( se::CudaComputeCapability{compute_capability.first, compute_capability.second}, se::dnn::VersionInfo(8, 3, 0)); TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module)); CallInliner inliner; TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status()); return changed; } }; TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = s8[2,2,40,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", backend_config="{bar: 0}" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 10, 4}), m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 10, 4, 44})) .WithConvDnums("b01f?_01i?o->b01f?")) .WithShape(S8, {10, 20, 30, 11, 4})), m::Op()))); EXPECT_EQ(conv->raw_backend_config_string(), "{bar: 0}"); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = f32[2,2,40,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", backend_config="{bar: 0}" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,48,20,30] parameter(0) filter = s8[48,44,2,2] parameter(1) ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=bf01_io01->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 12, 4, 20, 30}), m::Reshape(m::Parameter(1)) .WithShape(S8, {12, 4, 44, 2, 2})) .WithConvDnums("bf?01_i?o01->bf?01")) .WithShape(S8, {10, 11, 4, 20, 30})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[16,16,16,16] parameter(0) filter = s8[16,16,3,3] parameter(1) ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=fb01_i01o->fb01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {4, 4, 16, 16, 16}), m::Reshape(m::Parameter(1)) .WithShape(S8, {4, 4, 16, 3, 3})) .WithConvDnums("f?b01_i?01o->f?b01")) .WithShape(S8, {4, 4, 16, 16, 16})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[1,20,9,9] parameter(0) filter = s8[3,3,20,32] parameter(1) ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {1, 5, 4, 9, 9}), m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 3, 5, 4, 32})) .WithConvDnums("bf?01_01i?o->bf?01")) .WithShape(S8, {1, 8, 4, 9, 9})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); CudnnVectorizeConvolutions pass( {7, 5}, se::dnn::VersionInfo{8, 3, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose( m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2})) .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{2, 0, 1, 5, 7, 3, 6, 4}); })) .WithShape(S8, {128, 2, 2, 2, 32}))) .WithShape(S8, {10, 20, 30, 4, 32})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) bias = f32[128] parameter(2) side_input = s8[10,20,30,64] parameter(3) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(1)))) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(2)) .WithShape(F32, {4, 4, 2, 4})) .WithShape(F32, {4, 2, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 2, 1, 3}); })) .WithShape(F32, {128}), m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 20, 30, 2, 32}))) .WithShape(S8, {10, 20, 30, 4, 32})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) bias = f32[128] parameter(2) side_input = s8[10,128,20,30] parameter(3) ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(1)))) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(2)) .WithShape(F32, {4, 4, 2, 4})) .WithShape(F32, {4, 2, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 2, 1, 3}); })) .WithShape(F32, {128}), m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 4, 32, 20, 30}))) .WithShape(S8, {10, 4, 32, 20, 30})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 16, 4}), m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 16, 4, 128}))) .WithShape(S8, {10, 20, 30, 32, 4})), m::Op()))); EXPECT_FALSE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,16,4] parameter(0) filter = s8[3,5,16,192,4] parameter(1) bias = f32[64] parameter(2) side_input = s8[10,20,30,16,4] parameter(3) ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{2, 0, 1, 4, 6, 3, 5, 7}); })) .WithShape(S8, {192, 2, 3, 5, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 32})) .WithConvDnums("b01f?_oi01?->b01f?")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 8, 4})) .WithShape(S8, {10, 20, 30, 6, 8, 4})) .WithShape(S8, {10, 20, 30, 48, 4}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,16,20,30,4] parameter(0) filter = s8[16,128,2,2,4] parameter(1) bias = f32[64] parameter(2) side_input = s8[10,16,20,30,4] parameter(3) ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=bf01_io01->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 2, 8, 20, 30, 4})) .WithShape(S8, {10, 2, 20, 30, 8, 4})) .WithShape(S8, {10, 2, 20, 30, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4})) .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 5, 6, 2, 4, 1, 3, 7}); })) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 2, 8, 20, 30, 4})) .WithShape(S8, {10, 2, 20, 30, 8, 4})) .WithShape(S8, {10, 2, 20, 30, 32})) .WithConvDnums("bf01_oi01->bf01")) .WithShape(S8, {10, 4, 20, 30, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 4, 20, 30, 8, 4})) .WithShape(S8, {10, 4, 8, 20, 30, 4})) .WithShape(S8, {10, 32, 20, 30, 4}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[4,10,20,30,16] parameter(0) filter = s8[4,3,5,16,192] parameter(1) bias = f32[64] parameter(2) side_input = s8[4,10,20,30,16] parameter(3) ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input), window={size=3x5}, dim_labels=?b01f_?01io->?b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {4, 10, 20, 30, 2, 8})) .WithShape(S8, {8, 4, 10, 20, 30, 2})) .WithShape(S8, {32, 10, 20, 30, 2}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{3, 1, 2, 5, 7, 4, 6, 0}); })) .WithShape(S8, {192, 2, 3, 5, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {4, 10, 20, 30, 2, 8})) .WithShape(S8, {8, 4, 10, 20, 30, 2})) .WithShape(S8, {32, 10, 20, 30, 2})) .WithConvDnums("?b01f_oi01->?b01f")) .WithShape(S8, {32, 10, 20, 30, 6}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {8, 4, 10, 20, 30, 6})) .WithShape(S8, {4, 10, 20, 30, 6, 8})) .WithShape(S8, {4, 10, 20, 30, 48}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,16,4] parameter(0) filter = s8[2,2,16,128,4] parameter(1) bias = f32[10] parameter(2) side_input = s8[10,20,30,16,4] parameter(3) ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,4,16] parameter(0) filter = s8[3,5,4,192,16] parameter(1) ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto filter_pat = m::Reshape( m::Transpose( m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16})) .WithShape(S8, {3, 5, 2, 192, 2, 16})) .WithShape(S8, {3, 5, 2, 192, 32}); auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape( m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 2, 16})) .WithShape(S8, {10, 20, 30, 2, 2, 16})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(filter_pat) .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})) .WithShape(S8, {192, 2, 3, 5, 32})) .WithConvDnums("b01f_oi01->b01f")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 2, 16})) .WithShape(S8, {10, 20, 30, 6, 2, 16})) .WithShape(S8, {10, 20, 30, 12, 16}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,8,8] parameter(0) filter = s8[3,5,2,192,32] parameter(1) ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 4, 8})) .WithShape(S8, {10, 20, 30, 2, 4, 8})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})) .WithShape(S8, {192, 2, 3, 5, 32})) .WithConvDnums("b01f_oi01->b01f")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 16, 2})) .WithShape(S8, {10, 20, 30, 6, 16, 2})) .WithShape(S8, {10, 20, 30, 96, 2}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
376cd3aa-a22b-41e5-b02a-66981383442d
cpp
tensorflow/tensorflow
quantize_nodes
tensorflow/tools/graph_transforms/quantize_nodes.cc
tensorflow/tools/graph_transforms/quantize_nodes_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/threadpool_device.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/init_main.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { struct QuantizedOpInfo { string float_name; std::vector<string> attrs_to_copy; std::vector<std::pair<string, DataType>> dtypes_to_set; DataType input_bit_depth; DataType output_bit_depth; std::set<int32> unquantized_inputs; enum { CONTIGUOUS_MIN_MAX, SEPARATE_MIN_MAX } min_max_order; }; const std::vector<QuantizedOpInfo>& GetQuantizedOpList() { static const std::vector<QuantizedOpInfo> op_list = { {"Add", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"AvgPool", {"ksize", "strides", "padding"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"BiasAdd", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"out_type", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Concat", {"N"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {0}, QuantizedOpInfo::SEPARATE_MIN_MAX}, {"Conv2D", {"strides", "padding"}, {{"Tinput", DT_QUINT8}, {"Tfilter", DT_QUINT8}, {"out_type", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"MatMul", {"transpose_a", "transpose_b"}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"MaxPool", {"ksize", "strides", "padding"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Mul", {}, {{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}}, DT_QUINT8, DT_QINT32, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Relu", {}, {{"Tinput", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"ResizeBilinear", {"align_corners"}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {1}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Relu6", {}, {{"Tinput", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, {"Reshape", {}, {{"T", DT_QUINT8}}, DT_QUINT8, DT_QUINT8, {1}, QuantizedOpInfo::CONTIGUOUS_MIN_MAX}, }; return op_list; } namespace { string UniqueNodeNameFromInput(const string& input_name) { string prefix; string node_name; string suffix; NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix); string result; if (prefix == "^") { result += "__hat__"; } result += node_name; if (!suffix.empty()) { result += "__port__" + suffix.substr(1, suffix.size() - 1); } return result; } Status ExtractRangeFromParams(const TransformFuncContext& context, const string& min_name, const string& max_name, float* min_value, float* max_value, bool* has_range) { const bool has_min = (context.params.count(min_name) != 0); const bool has_max = (context.params.count(max_name) != 0); *has_range = (has_min || has_max); if (!*has_range) { return OkStatus(); } if (!has_min || !has_max) { return errors::InvalidArgument("You must pass both ", min_name, " and ", max_name, " into quantize_nodes"); } TF_RETURN_IF_ERROR(context.GetOneFloatParameter(min_name, 0.0f, min_value)); TF_RETURN_IF_ERROR(context.GetOneFloatParameter(max_name, 0.0f, max_value)); return OkStatus(); } } Status MergeDuplicateNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> input_names(context.input_names.begin(), context.input_names.end()); std::set<string> output_names(context.output_names.begin(), context.output_names.end()); GraphDef current_graph_def = input_graph_def; bool any_duplicates_found; do { any_duplicates_found = false; std::map<uint64, std::vector<const NodeDef*>> hashed_nodes; for (const NodeDef& node : current_graph_def.node()) { NodeDef nameless_node = node; if (!input_names.count(node.name()) && !output_names.count(node.name())) { nameless_node.set_name(""); } const uint64 hash = HashNodeDef(nameless_node); hashed_nodes[hash].push_back(&node); } std::map<string, string> inputs_to_rename; GraphDef merged_graph_def; for (const std::pair<const uint64, std::vector<const NodeDef*>>& hashed_node_info : hashed_nodes) { const std::vector<const NodeDef*>& hash_node_list = hashed_node_info.second; for (int i = 0; i < hash_node_list.size(); ++i) { const NodeDef* current_node = hash_node_list[i]; const OpDef* op_def = nullptr; TF_RETURN_IF_ERROR( OpRegistry::Global()->LookUpOpDef(current_node->op(), &op_def)); const bool is_duplicate = ((!op_def->is_stateful()) && (i > 0)); if (is_duplicate) { const string original_name = hash_node_list[0]->name(); inputs_to_rename[current_node->name() + ":*"] = original_name; any_duplicates_found = true; } else { NodeDef* new_node = merged_graph_def.mutable_node()->Add(); *new_node = *current_node; } } } TF_RETURN_IF_ERROR(RenameNodeInputs(merged_graph_def, inputs_to_rename, std::unordered_set<string>(), &current_graph_def)); } while (any_duplicates_found); *output_graph_def = current_graph_def; return OkStatus(); } Status RemoveRedundantQuantizations(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> graph_outputs; for (const string& output_name : context.output_names) { graph_outputs.insert(NodeNameFromInput(output_name)); } std::map<string, string> inputs_to_rename; GraphDef replaced_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"QuantizeV2", { {"Dequantize"}, {"Min"}, {"Max"}, } }, [&inputs_to_rename, &graph_outputs](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& quantize_node = match.node; const NodeDef& dequantize_node = match.inputs[0].node; inputs_to_rename[quantize_node.name() + ":0"] = dequantize_node.input(0); inputs_to_rename[quantize_node.name() + ":1"] = dequantize_node.input(1); inputs_to_rename[quantize_node.name() + ":2"] = dequantize_node.input(2); if (output_nodes.count(dequantize_node.name()) || graph_outputs.count(dequantize_node.name())) { CopyOriginalMatch(match, new_nodes); } return OkStatus(); }, {true}, &replaced_graph_def)); return RenameNodeInputs(replaced_graph_def, inputs_to_rename, std::unordered_set<string>(), output_graph_def); } Status QuantizePlaceholders(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { float input_min; float input_max; bool has_input_range; TF_RETURN_IF_ERROR(ExtractRangeFromParams(context, "input_min", "input_max", &input_min, &input_max, &has_input_range)); if (!has_input_range) { *output_graph_def = input_graph_def; return OkStatus(); } std::map<string, string> inputs_to_rename_first_pass; std::map<string, string> inputs_to_rename_second_pass; GraphDef placeholder_graph_def; placeholder_graph_def.Clear(); for (const NodeDef& node : input_graph_def.node()) { if (node.op() != "Placeholder") { *(placeholder_graph_def.mutable_node()->Add()) = node; } else { string namespace_prefix = node.name() + "_eightbit"; NodeDef quantized_placeholder; quantized_placeholder = node; SetNodeAttr("dtype", DT_QUINT8, &quantized_placeholder); *(placeholder_graph_def.mutable_node()->Add()) = quantized_placeholder; NodeDef min_node; min_node.set_op("Const"); min_node.set_name(namespace_prefix + "/min"); SetNodeAttr("dtype", DT_FLOAT, &min_node); Tensor min_tensor(DT_FLOAT, {}); min_tensor.flat<float>()(0) = input_min; SetNodeTensorAttr<float>("value", min_tensor, &min_node); *(placeholder_graph_def.mutable_node()->Add()) = min_node; NodeDef max_node; max_node.set_op("Const"); max_node.set_name(namespace_prefix + "/max"); SetNodeAttr("dtype", DT_FLOAT, &max_node); Tensor max_tensor(DT_FLOAT, {}); max_tensor.flat<float>()(0) = input_max; SetNodeTensorAttr<float>("value", max_tensor, &max_node); *(placeholder_graph_def.mutable_node()->Add()) = max_node; const string rename_suffix = "__RENAMED_PLACEHOLDER__"; NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(namespace_prefix + "/dequantize"); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(node.name() + rename_suffix, &dequantize_node); AddNodeInput(min_node.name(), &dequantize_node); AddNodeInput(max_node.name(), &dequantize_node); *(placeholder_graph_def.mutable_node()->Add()) = dequantize_node; inputs_to_rename_first_pass[node.name()] = dequantize_node.name(); inputs_to_rename_second_pass[node.name() + rename_suffix] = node.name(); } } GraphDef first_pass_graph_def; TF_RETURN_IF_ERROR( RenameNodeInputs(placeholder_graph_def, inputs_to_rename_first_pass, std::unordered_set<string>(), &first_pass_graph_def)); TF_RETURN_IF_ERROR( RenameNodeInputs(first_pass_graph_def, inputs_to_rename_second_pass, std::unordered_set<string>(), output_graph_def)); return OkStatus(); } Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"FakeQuantWithMinMaxVars", { {"*"}, {"Const"}, {"Const"}, } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_quant_node = match.node; const NodeDef& original_op_node = match.inputs[0].node; const NodeDef& fake_quant_min_node = match.inputs[1].node; const NodeDef& fake_quant_max_node = match.inputs[2].node; string namespace_prefix = fake_quant_node.name() + "_eightbit"; new_nodes->push_back(original_op_node); new_nodes->push_back(fake_quant_min_node); new_nodes->push_back(fake_quant_max_node); NodeDef quantize_node; quantize_node.set_op("QuantizeV2"); quantize_node.set_name(namespace_prefix + "/quantize"); SetNodeAttr("T", DT_QINT32, &quantize_node); SetNodeAttr("mode", "MIN_FIRST", &quantize_node); AddNodeInput(fake_quant_node.input(0), &quantize_node); AddNodeInput(fake_quant_min_node.name(), &quantize_node); AddNodeInput(fake_quant_max_node.name(), &quantize_node); new_nodes->push_back(quantize_node); NodeDef requantize_node; requantize_node.set_op("Requantize"); requantize_node.set_name(namespace_prefix + "/requantize"); SetNodeAttr("Tinput", DT_QINT32, &requantize_node); SetNodeAttr("out_type", DT_QUINT8, &requantize_node); AddNodeInput(quantize_node.name() + ":0", &requantize_node); AddNodeInput(quantize_node.name() + ":1", &requantize_node); AddNodeInput(quantize_node.name() + ":2", &requantize_node); AddNodeInput(fake_quant_min_node.name(), &requantize_node); AddNodeInput(fake_quant_max_node.name(), &requantize_node); new_nodes->push_back(requantize_node); NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(fake_quant_node.name()); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(requantize_node.name() + ":0", &dequantize_node); AddNodeInput(requantize_node.name() + ":1", &dequantize_node); AddNodeInput(requantize_node.name() + ":2", &dequantize_node); new_nodes->push_back(dequantize_node); return OkStatus(); }, {}, output_graph_def)); return OkStatus(); } Status MergeAdjacentRequantizes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( input_graph_def, {"Requantize", { {"QuantizeV2", { {"Dequantize", { {"Requantize", { {"*"}, {"*"}, {"*"}, {"RequantizationRange"}, {"RequantizationRange"}, } }, {"Requantize"}, {"Requantize"}, } }, {"Const"}, {"Const"}, }, }, {"QuantizeV2"}, {"QuantizeV2"}, {"Const"}, {"Const"}, } }, [](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_requantize_node = match.node; const NodeDef& original_op_node = match.inputs[0].inputs[0].inputs[0].inputs[0].node; const NodeDef& fake_requantize_min_node = match.inputs[3].node; const NodeDef& fake_requantize_max_node = match.inputs[4].node; new_nodes->push_back(original_op_node); new_nodes->push_back(fake_requantize_min_node); new_nodes->push_back(fake_requantize_max_node); NodeDef requantize_node; requantize_node = fake_requantize_node; requantize_node.mutable_input()->Clear(); AddNodeInput(original_op_node.name() + ":0", &requantize_node); AddNodeInput(original_op_node.name() + ":1", &requantize_node); AddNodeInput(original_op_node.name() + ":2", &requantize_node); AddNodeInput(fake_requantize_min_node.name(), &requantize_node); AddNodeInput(fake_requantize_max_node.name(), &requantize_node); new_nodes->push_back(requantize_node); return OkStatus(); }, {}, output_graph_def)); return OkStatus(); } Status HoistFakeQuants(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { GraphDef current_graph_def = input_graph_def; const int max_depth = 3; for (int depth = max_depth; depth > 0; --depth) { OpTypePattern pattern = {"*"}; for (int i = 0; i < depth; ++i) { pattern = {"*", {pattern}}; } pattern = {"FakeQuantWithMinMaxVars", {pattern, {"Const"}, {"Const"}}}; GraphDef hoisted_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( current_graph_def, pattern, [depth](const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& fake_quant_node = match.node; const NodeDef& fake_quant_min_node = match.inputs[1].node; const NodeDef& fake_quant_max_node = match.inputs[2].node; std::vector<NodeDef> linear_nodes; NodeMatch current_match = match; for (int i = 0; i <= depth; ++i) { linear_nodes.push_back(current_match.inputs[0].node); current_match = current_match.inputs[0]; } NodeDef new_fake_quant_node; new_fake_quant_node = fake_quant_node; new_fake_quant_node.set_name(fake_quant_node.name() + "_hoisted"); new_fake_quant_node.set_input( 0, linear_nodes[linear_nodes.size() - 2].input(0)); new_nodes->push_back(new_fake_quant_node); new_nodes->push_back(fake_quant_min_node); new_nodes->push_back(fake_quant_max_node); linear_nodes[linear_nodes.size() - 2].set_input( 0, new_fake_quant_node.name()); linear_nodes.front().set_name(fake_quant_node.name()); for (const NodeDef& linear_node : linear_nodes) { new_nodes->push_back(linear_node); } return OkStatus(); }, {}, &hoisted_graph_def)); current_graph_def = hoisted_graph_def; } *output_graph_def = current_graph_def; return OkStatus(); } Status QuantizeNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def) { std::set<string> ops_to_ignore; if (context.params.count("ignore_op") > 0) { for (const string& name : context.params.at("ignore_op")) { ops_to_ignore.insert(name); } } const std::vector<QuantizedOpInfo>& op_list = GetQuantizedOpList(); string op_pattern; bool is_first = true; std::map<string, QuantizedOpInfo> op_map; for (const QuantizedOpInfo& op_info : op_list) { if (ops_to_ignore.count(op_info.float_name) == 0) { strings::StrAppend(&op_pattern, (is_first ? "" : "|"), op_info.float_name); op_map.insert({op_info.float_name, op_info}); is_first = false; } } GraphDef placeholder_graph_def; TF_RETURN_IF_ERROR( QuantizePlaceholders(input_graph_def, context, &placeholder_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(placeholder_graph_def)); GraphDef hoisted_graph_def; TF_RETURN_IF_ERROR( HoistFakeQuants(placeholder_graph_def, context, &hoisted_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(hoisted_graph_def)); GraphDef converted_graph_def; TF_RETURN_IF_ERROR(ConvertFakeQuantsToRequantize(hoisted_graph_def, context, &converted_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(converted_graph_def)); float fallback_min; float fallback_max; bool has_fallback_range; TF_RETURN_IF_ERROR(ExtractRangeFromParams( context, "fallback_min", "fallback_max", &fallback_min, &fallback_max, &has_fallback_range)); GraphDef quantized_graph_def; TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes( converted_graph_def, {op_pattern}, [&op_map, fallback_min, fallback_max, has_fallback_range]( const NodeMatch& match, const std::set<string>& input_nodes, const std::set<string>& output_nodes, std::vector<NodeDef>* new_nodes) { const NodeDef& float_node = match.node; const QuantizedOpInfo& op_info = op_map[float_node.op()]; DataTypeVector input_types; DataTypeVector output_types; TF_RETURN_IF_ERROR( GetInOutTypes(float_node, &input_types, &output_types)); bool are_all_float = true; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { continue; } if (i >= input_types.size()) { LOG(ERROR) << "input_types has incorrect size " << input_types.size() << " <= " << i << ". Assuming everything else is floats."; } if (i < input_types.size() && input_types[i] != DT_FLOAT) { are_all_float = false; } } for (const DataType& output_type : output_types) { if (output_type != DT_FLOAT) { are_all_float = false; } } if (!are_all_float) { CopyOriginalMatch(match, new_nodes); return OkStatus(); } string namespace_prefix = float_node.name() + "_eightbit"; std::vector<string> quantized_input_names; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { continue; } const string& input_name = float_node.input(i); string unique_input_name = namespace_prefix + "/" + UniqueNodeNameFromInput(input_name); NodeDef reshape_dims; reshape_dims.set_op("Const"); reshape_dims.set_name(unique_input_name + "/reshape_dims"); AddNodeInput("^" + NodeNameFromInput(input_name), &reshape_dims); SetNodeAttr("dtype", DT_INT32, &reshape_dims); Tensor reshape_dims_tensor(DT_INT32, {1}); reshape_dims_tensor.flat<int32>()(0) = -1; SetNodeTensorAttr<int32>("value", reshape_dims_tensor, &reshape_dims); new_nodes->push_back(reshape_dims); NodeDef reduction_dims; reduction_dims.set_op("Const"); reduction_dims.set_name(unique_input_name + "/reduction_dims"); AddNodeInput("^" + NodeNameFromInput(input_name), &reduction_dims); SetNodeAttr("dtype", DT_INT32, &reduction_dims); Tensor reduction_dims_tensor(DT_INT32, {1}); reduction_dims_tensor.flat<int32>()(0) = 0; SetNodeTensorAttr<int32>("value", reduction_dims_tensor, &reduction_dims); new_nodes->push_back(reduction_dims); NodeDef reshape_node; reshape_node.set_op("Reshape"); reshape_node.set_name(unique_input_name + "/reshape"); SetNodeAttr("T", DT_FLOAT, &reshape_node); AddNodeInput(input_name, &reshape_node); AddNodeInput(reshape_dims.name(), &reshape_node); new_nodes->push_back(reshape_node); NodeDef min_node; min_node.set_op("Min"); min_node.set_name(unique_input_name + "/min"); SetNodeAttr("T", DT_FLOAT, &min_node); SetNodeAttr("keep_dims", false, &min_node); AddNodeInput(reshape_node.name(), &min_node); AddNodeInput(reduction_dims.name(), &min_node); new_nodes->push_back(min_node); NodeDef max_node; max_node.set_op("Max"); max_node.set_name(unique_input_name + "/max"); SetNodeAttr("T", DT_FLOAT, &max_node); SetNodeAttr("keep_dims", false, &max_node); AddNodeInput(reshape_node.name(), &max_node); AddNodeInput(reduction_dims.name(), &max_node); new_nodes->push_back(max_node); NodeDef quantize_node; quantize_node.set_op("QuantizeV2"); quantize_node.set_name(unique_input_name + "/quantize"); SetNodeAttr("T", DT_QUINT8, &quantize_node); SetNodeAttr("mode", "MIN_FIRST", &quantize_node); AddNodeInput(input_name, &quantize_node); AddNodeInput(min_node.name(), &quantize_node); AddNodeInput(max_node.name(), &quantize_node); new_nodes->push_back(quantize_node); quantized_input_names.push_back(quantize_node.name()); } NodeDef quantized_main_node; quantized_main_node.set_op("Quantized" + float_node.op()); quantized_main_node.set_name(float_node.name() + "/eightbit"); for (const string& attr_to_copy : op_info.attrs_to_copy) { CopyNodeAttr(float_node, attr_to_copy, attr_to_copy, &quantized_main_node); } for (const std::pair<string, DataType>& dtype_to_set : op_info.dtypes_to_set) { SetNodeAttr(dtype_to_set.first, dtype_to_set.second, &quantized_main_node); } int quantized_input_index = 0; for (int i = 0; i < float_node.input_size(); ++i) { if (op_info.unquantized_inputs.count(i)) { AddNodeInput(float_node.input(i), &quantized_main_node); } else { const string& quantized_input_name = quantized_input_names[quantized_input_index]; AddNodeInput(quantized_input_name + ":0", &quantized_main_node); ++quantized_input_index; } } if (op_info.min_max_order == QuantizedOpInfo::CONTIGUOUS_MIN_MAX) { for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":1", &quantized_main_node); AddNodeInput(quantized_input_name + ":2", &quantized_main_node); } } else { for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":1", &quantized_main_node); } for (const string& quantized_input_name : quantized_input_names) { AddNodeInput(quantized_input_name + ":2", &quantized_main_node); } } new_nodes->push_back(quantized_main_node); string eight_bit_node_name; if (op_info.output_bit_depth == DT_QINT32) { string requantize_min_input; string requantize_max_input; if (has_fallback_range) { NodeDef fallback_min_node; fallback_min_node.set_op("Const"); fallback_min_node.set_name(quantized_main_node.name() + "/fallback_min"); SetNodeAttr("dtype", DT_FLOAT, &fallback_min_node); Tensor fallback_min_tensor(DT_FLOAT, {}); fallback_min_tensor.flat<float>()(0) = fallback_min; SetNodeTensorAttr<float>("value", fallback_min_tensor, &fallback_min_node); new_nodes->push_back(fallback_min_node); NodeDef fallback_max_node; fallback_max_node.set_op("Const"); fallback_max_node.set_name(quantized_main_node.name() + "/fallback_max"); SetNodeAttr("dtype", DT_FLOAT, &fallback_max_node); Tensor fallback_max_tensor(DT_FLOAT, {}); fallback_max_tensor.flat<float>()(0) = fallback_max; SetNodeTensorAttr<float>("value", fallback_max_tensor, &fallback_max_node); new_nodes->push_back(fallback_max_node); requantize_min_input = fallback_min_node.name(); requantize_max_input = fallback_max_node.name(); } else { NodeDef requant_range_node; requant_range_node.set_op("RequantizationRange"); requant_range_node.set_name(quantized_main_node.name() + "/requant_range"); SetNodeAttr("Tinput", DT_QINT32, &requant_range_node); AddNodeInput(quantized_main_node.name() + ":0", &requant_range_node); AddNodeInput(quantized_main_node.name() + ":1", &requant_range_node); AddNodeInput(quantized_main_node.name() + ":2", &requant_range_node); new_nodes->push_back(requant_range_node); requantize_min_input = requant_range_node.name() + ":0"; requantize_max_input = requant_range_node.name() + ":1"; } NodeDef requantize_node; requantize_node.set_op("Requantize"); requantize_node.set_name(quantized_main_node.name() + "/requantize"); SetNodeAttr("Tinput", DT_QINT32, &requantize_node); SetNodeAttr("out_type", DT_QUINT8, &requantize_node); AddNodeInput(quantized_main_node.name() + ":0", &requantize_node); AddNodeInput(quantized_main_node.name() + ":1", &requantize_node); AddNodeInput(quantized_main_node.name() + ":2", &requantize_node); AddNodeInput(requantize_min_input, &requantize_node); AddNodeInput(requantize_max_input, &requantize_node); new_nodes->push_back(requantize_node); eight_bit_node_name = requantize_node.name(); } else { eight_bit_node_name = quantized_main_node.name(); } NodeDef dequantize_node; dequantize_node.set_op("Dequantize"); dequantize_node.set_name(float_node.name()); SetNodeAttr("T", DT_QUINT8, &dequantize_node); SetNodeAttr("mode", "MIN_FIRST", &dequantize_node); AddNodeInput(eight_bit_node_name + ":0", &dequantize_node); AddNodeInput(eight_bit_node_name + ":1", &dequantize_node); AddNodeInput(eight_bit_node_name + ":2", &dequantize_node); new_nodes->push_back(dequantize_node); return OkStatus(); }, {}, &quantized_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(quantized_graph_def)); GraphDef merged_graph_def; TF_RETURN_IF_ERROR(MergeAdjacentRequantizes(quantized_graph_def, context, &merged_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(merged_graph_def)); GraphDef deduped_graph_def; TF_RETURN_IF_ERROR( MergeDuplicateNodes(merged_graph_def, context, &deduped_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(deduped_graph_def)); TF_RETURN_IF_ERROR(RemoveRedundantQuantizations(deduped_graph_def, context, output_graph_def)); TF_RETURN_IF_ERROR(IsGraphValid(*output_graph_def)); return OkStatus(); } REGISTER_GRAPH_TRANSFORM("quantize_nodes", QuantizeNodes); REGISTER_GRAPH_TRANSFORM("merge_duplicate_nodes", MergeDuplicateNodes); } }
#define EIGEN_USE_THREADS #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/tools/graph_transforms/transform_utils.h" namespace tensorflow { namespace graph_transforms { Status QuantizeNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status RemoveRedundantQuantizations(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status QuantizePlaceholders(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status MergeAdjacentRequantizes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status HoistFakeQuants(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); Status MergeDuplicateNodes(const GraphDef& input_graph_def, const TransformFuncContext& context, GraphDef* output_graph_def); class QuantizeNodesTest : public ::testing::Test { protected: void TestTransformedVersusFloatGraph( const TransformFunc& transform_function, const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<std::pair<string, Tensor>>& transformed_inputs, const std::vector<string>& output_names, const TransformFuncContext& in_context, double threshold, GraphDef* transformed_graph_def) { std::unique_ptr<Session> float_session(NewSession(SessionOptions())); TF_ASSERT_OK(float_session->Create(float_graph_def)); std::vector<Tensor> float_outputs; TF_ASSERT_OK( float_session->Run(float_inputs, output_names, {}, &float_outputs)); TransformFuncContext context(in_context); std::vector<string> input_names; for (const std::pair<const string&, const Tensor&> float_input : float_inputs) { context.input_names.push_back(float_input.first); } context.output_names = output_names; TF_ASSERT_OK( transform_function(float_graph_def, context, transformed_graph_def)); std::unique_ptr<Session> transformed_session(NewSession(SessionOptions())); TF_ASSERT_OK(transformed_session->Create(*transformed_graph_def)); std::vector<Tensor> transformed_outputs; TF_ASSERT_OK(transformed_session->Run(transformed_inputs, output_names, {}, &transformed_outputs)); const int output_count = output_names.size(); EXPECT_EQ(output_count, float_outputs.size()); EXPECT_EQ(output_count, transformed_outputs.size()); for (int i = 0; i < output_count; ++i) { test::ExpectTensorNear<float>(float_outputs[i], transformed_outputs[i], threshold); } } void TestQuantizedVersusFloatGraph( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& inputs, const std::vector<string>& output_names) { GraphDef quantized_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, inputs, inputs, output_names, {}, 1.0, &quantized_graph_def); const std::set<string> quantizable_ops = { "Add", "BiasAdd", "Concat", "Conv2D", "MatMul", "Relu", "Relu6", "ResizeBilinear", "AvgPool", "MaxPool", "Mul"}; for (const NodeDef& node : quantized_graph_def.node()) { EXPECT_EQ(0, quantizable_ops.count(node.op())) << "Found quantizable node " << node.op() << " for node named " << node.name(); } } void TestGraphWithInputRange( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<string>& output_names, float range_min, float range_max) { TransformFuncContext context; context.params["input_min"] = {strings::StrCat(range_min)}; context.params["input_max"] = {strings::StrCat(range_max)}; std::vector<std::pair<string, Tensor>> quantized_inputs; for (const std::pair<string, Tensor>& float_input : float_inputs) { const Tensor& float_tensor = float_input.second; Tensor quantized_tensor(DT_QUINT8, float_tensor.shape()); FloatTensorToQuantizedInPlace<quint8>(float_tensor, range_min, range_max, &quantized_tensor); quantized_inputs.push_back({float_input.first, quantized_tensor}); } GraphDef quantized_graph_def; TestTransformedVersusFloatGraph( QuantizeNodes, float_graph_def, float_inputs, quantized_inputs, output_names, context, 1.0, &quantized_graph_def); } void TestGraphWithFallbackRange( const GraphDef& float_graph_def, const std::vector<std::pair<string, Tensor>>& float_inputs, const std::vector<string>& output_names, float range_min, float range_max, GraphDef* quantized_graph_def) { TransformFuncContext context; context.params["fallback_min"] = {strings::StrCat(range_min)}; context.params["fallback_max"] = {strings::StrCat(range_max)}; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, float_inputs, float_inputs, output_names, context, 2.0, quantized_graph_def); } void TestIgnoreOps(std::initializer_list<string> ops_to_ignore) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; auto const_op = [&](const string& name, const TensorShape& shape, std::initializer_list<float> values) { Tensor tensor(DT_FLOAT, shape); test::FillValues<float>(&tensor, values); return Const(root.WithOpName(name), Input::Initializer(tensor)); }; int m = 1; int n = 1; int k = 1; Output a_op = const_op("a_op", {m, k}, {2}); Output b_op = const_op("b_op", {k, n}, {3}); Output c_op = const_op("c_op", {m, k}, {1}); Output d_op = const_op("d_op", {k, n}, {4}); Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op); Output mul_op = Mul(root.WithOpName("mul"), c_op, d_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TransformFuncContext context; if (ops_to_ignore.size() > 0) { context.params["ignore_op"] = ops_to_ignore; } GraphDef quantized_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {}, {"mat_mul_op", "mul"}, context, 1.0, &quantized_graph_def); for (const string& op_name : ops_to_ignore) { bool exists_in_quantized_graph = false; for (const NodeDef& node : quantized_graph_def.node()) { if (node.op() == op_name) { exists_in_quantized_graph = true; break; } } EXPECT_TRUE(exists_in_quantized_graph) << "Op " << op_name << " should not have been replace by a quantized version"; } } void TestQuantizeMatMul(int m, int n, int k, const std::vector<float>& a_values, const std::vector<float>& b_values) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor a_tensor(DT_FLOAT, TensorShape({m, k})); test::FillValues<float>(&a_tensor, a_values); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({k, n})); test::FillValues<float>(&b_tensor, b_values); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mat_mul_op"}); } void TestQuantizeMatMulTiny() { TestQuantizeMatMul(1, 1, 1, {2}, {3}); TestQuantizeMatMul(1, 2, 1, {1}, {2, 3}); TestQuantizeMatMul(1, 1, 2, {1, 1}, {1, 1}); TestQuantizeMatMul(1, 1, 2, {0, 0}, {1, 1}); TestQuantizeMatMul(1, 1, 2, {1, 2}, {1, 2}); } void TestQuantizeMatMulSmall() { TestQuantizeMatMul(2, 4, 3, {1, 2, 3, 4, 5, 6}, {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); } void TestQuantizeMul() { using namespace ::tensorflow::ops; std::vector<int64_t> x_shape({10, 100}); const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = (i % 256) / 256.0f; } std::vector<int64_t> y_shape({100}); const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Mul mul = Mul(root.WithOpName("mul"), x, y); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mul"}); } void TestQuantizeAdd() { using namespace ::tensorflow::ops; std::vector<int64_t> x_shape({10, 100}); const size_t x_num_elements = TensorShape(x_shape).num_elements(); std::vector<float> x_values(x_num_elements); for (int i = 0; i < x_num_elements; ++i) { x_values[i] = (i % 256) / 256.0f; } std::vector<int64_t> y_shape({100}); const size_t y_num_elements = TensorShape(y_shape).num_elements(); std::vector<float> y_values(y_num_elements); for (int i = 0; i < y_num_elements; ++i) { y_values[i] = ((i + 23) % 123) - 50; } Scope root = Scope::NewRootScope(); Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape)); test::FillValues<float>(&x_float_tensor, x_values); Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor)); Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape)); test::FillValues<float>(&y_float_tensor, y_values); Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor)); Add add = Add(root.WithOpName("add"), x, y); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"add"}); } void TestQuantizeConv2D(int depth, int input_width, int input_height, int input_batch_count, int filter_size, int filter_count, int stride, const string& padding, const std::vector<float>& input_values, const std::vector<float>& filter_values) { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({input_batch_count, input_height, input_width, depth})); test::FillValues<float>(&input_tensor, input_values); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor filter_tensor( DT_FLOAT, TensorShape({filter_size, filter_size, depth, filter_count})); test::FillValues<float>(&filter_tensor, filter_values); Output filter_op = Const(root.WithOpName("filter_op"), Input::Initializer(filter_tensor)); Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, filter_op, {1, stride, stride, 1}, padding); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"conv_op"}); } void TestQuantizeBiasAdd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"bias_add_op"}); } void TestQuantizeConcat() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor shape_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&shape_tensor, {0}); Output shape_op = Const(root.WithOpName("shape_op"), Input::Initializer(shape_tensor)); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&a_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({2, 2, 3})); test::FillValues<float>(&b_tensor, {13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output concat_op = Concat(root.WithOpName("concat_op"), {a_op, b_op}, shape_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"concat_op"}); } void TestQuantizeRelu() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output relu_op = Relu(root.WithOpName("relu_op"), constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu_op"}); } void TestQuantizeRelu6() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output relu6_op = Relu6(root.WithOpName("relu6_op"), constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu6_op"}); } void TestQuantizeMaxPool() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), constant_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"max_pool_op"}); } void TestQuantizeAvgPool() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output avg_pool_op = AvgPool(root.WithOpName("avg_pool_op"), constant_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"avg_pool_op"}); } void TestQuantizeReshape() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor constant_tensor(DT_FLOAT, TensorShape({4, 5})); test::FillValues<float>(&constant_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); Output constant_op = Const(root.WithOpName("constant_op"), Input::Initializer(constant_tensor)); Output reshape_op = Reshape(root.WithOpName("reshape_op"), constant_op, {10, 2}); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TestQuantizedVersusFloatGraph(float_graph_def, {}, {"reshape_op"}); } void TestRemoveRedundantQuantization() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({})); test::FillValues<quint8>(&quantized_tensor, {0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), quantized_op, quantized_min_op, quantized_max_op); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize"}, {}, 1.0, &removed_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(removed_graph_def, &node_map); EXPECT_EQ(1, node_map.count("final_dequantize")); EXPECT_EQ("quantized_op", node_map.at("final_dequantize")->input(0)); } void TestRemoveRedundantQuantizationWithBiasAdd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6})); test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("bias_add_op"), quantized_op, offset_op, quantized_min_op, quantized_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize"}, {}, 1.0, &removed_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(removed_graph_def, &node_map); EXPECT_EQ(1, node_map.count("final_dequantize")); EXPECT_EQ("requantize_op", node_map.at("final_dequantize")->input(0)); } void TestQuantizeResizeBilinear() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor size_tensor(DT_INT32, TensorShape({2})); test::FillValues<int32>(&size_tensor, {256, 256}); Output constant_op = Const(root.WithOpName("size_tensor_op"), Input::Initializer(size_tensor)); Output placeholder_op = Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT); Output resize_bilinear_op = ResizeBilinear( root.WithOpName("resize_bilinear_op"), placeholder_op, constant_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); Tensor input_tensor(DT_FLOAT, {1, 128, 128, 3}); test::FillFn<float>(&input_tensor, [](int) { return 100.0f; }); TestQuantizedVersusFloatGraph(float_graph_def, {{"placeholder_op", input_tensor}}, {"resize_bilinear_op"}); } void TestRemoveRedundantQuantizationWithMultipleOutputs() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6})); test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("bias_add_op"), quantized_op, offset_op, quantized_min_op, quantized_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max); Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1}); Output dequantize_reshape_dims = Const(root.WithOpName("dequantize_reshape_dims"), Input::Initializer(dequantize_reshape_dims_tensor)); Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0}); Output dequantize_reduction_dims = Const(root.WithOpName("dequantize_reduction_dims"), Input::Initializer(dequantize_reduction_dims_tensor)); Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"), dequantize_op, dequantize_reshape_dims); Output dequantize_min = Min(root.WithOpName("dequantize_min"), dequantize_reshape, dequantize_reduction_dims, Min::Attrs().KeepDims(false)); Output dequantize_max = Max(root.WithOpName("dequantize_max"), dequantize_reshape, dequantize_reduction_dims, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, dequantize_min, dequantize_max, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize = Dequantize(root.WithOpName("final_dequantize"), quantize_op.output, quantize_op.output_min, quantize_op.output_max); Output relu_op = Relu(root.WithOpName("relu_op"), dequantize_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef removed_graph_def; TestTransformedVersusFloatGraph( RemoveRedundantQuantizations, float_graph_def, {}, {}, {"final_dequantize", "relu_op"}, {}, 1.0, &removed_graph_def); std::map<string, int> op_type_count; for (const NodeDef& node : removed_graph_def.node()) { ++op_type_count[node.op()]; } EXPECT_EQ(2, op_type_count["Dequantize"]); } void TestQuantizePlaceholders() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Output placeholder_op = Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT); Output relu_op = Relu(root.WithOpName("relu_op"), placeholder_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); TransformFuncContext context; context.input_names = {"placeholder_op"}; context.output_names = {"relu_op"}; context.params = {{"input_min", {"-10.0"}}, {"input_max", {"10.0"}}}; GraphDef quantized_graph_def; TF_ASSERT_OK( QuantizePlaceholders(float_graph_def, context, &quantized_graph_def)); Tensor input_tensor(DT_FLOAT, {}); input_tensor.flat<float>()(0) = 5.0f; TestQuantizedVersusFloatGraph( float_graph_def, {{"placeholder_op", input_tensor}}, {"relu_op"}); std::map<string, const NodeDef*> node_map; MapNamesToNodes(quantized_graph_def, &node_map); EXPECT_NE("placeholder_op", node_map.at("relu_op")->input(0)); EXPECT_EQ("Placeholder", node_map.at("placeholder_op")->op()); EXPECT_EQ(DT_QUINT8, node_map.at("placeholder_op")->attr().at("dtype").type()); } void TestInputRange() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({1, width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output bias_add = BiasAdd(root.WithOpName("bias_add"), a_const, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Tensor placeholder_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&placeholder_tensor, 1.0f); TestGraphWithInputRange(graph_def, {{"placeholder", placeholder_tensor}}, {"bias_add"}, 0.0f, 100.0f); } void TestFallbackRange() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 100; Tensor a_data(DT_FLOAT, TensorShape({1, width})); test::FillIota<float>(&a_data, 1.0f); Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data)); Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output bias_add = BiasAdd(root.WithOpName("bias_add"), a_const, placeholder); GraphDef graph_def; TF_ASSERT_OK(root.ToGraphDef(&graph_def)); Tensor placeholder_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&placeholder_tensor, 1.0f); GraphDef quantized_graph_def; TestGraphWithFallbackRange(graph_def, {{"placeholder", placeholder_tensor}}, {"bias_add"}, 0.0f, 200.0f, &quantized_graph_def); for (const NodeDef& node : quantized_graph_def.node()) { EXPECT_NE("RequantizationRange", node.op()); } } void TestConvertFakeQuantsToRequantize() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(ConvertFakeQuantsToRequantize, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); for (const NodeDef& node : converted_graph_def.node()) { EXPECT_NE("FakeQuantWithMinMaxVars", node.op()); } } void TestMergeAdjacentRequantizes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_QUINT8, TensorShape({1, 1, 2, 6})); test::FillValues<quint8>(&input_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor input_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&input_min_tensor, {0.0f}); Output input_min_op = Const(root.WithOpName("input_min_op"), Input::Initializer(input_min_tensor)); Tensor input_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&input_max_tensor, {255.0f}); Output input_max_op = Const(root.WithOpName("input_max_op"), Input::Initializer(input_max_tensor)); Tensor offset_tensor(DT_QUINT8, TensorShape({6})); test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6}); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Tensor offset_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_min_tensor, {0.0f}); Output offset_min_op = Const(root.WithOpName("offset_min_op"), Input::Initializer(offset_min_tensor)); Tensor offset_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&offset_max_tensor, {255.0f}); Output offset_max_op = Const(root.WithOpName("offset_max_op"), Input::Initializer(offset_max_tensor)); QuantizedBiasAdd quantized_bias_add_op( root.WithOpName("quantized_bias_add_op"), input_op, offset_op, input_min_op, input_max_op, offset_min_op, offset_max_op, DT_QINT32); RequantizationRange requantization_range_op( root.WithOpName("requantization_range_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out); Requantize requantize_op( root.WithOpName("requantize_op"), quantized_bias_add_op.output, quantized_bias_add_op.min_out, quantized_bias_add_op.max_out, requantization_range_op.output_min, requantization_range_op.output_max, DT_QUINT8); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), requantize_op.output, requantize_op.output_min, requantize_op.output_max, Dequantize::Attrs().Mode("MIN_FIRST")); Tensor quantize_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantize_min_tensor, {0.0f}); Output quantize_min_op = Const(root.WithOpName("quantize_min_op"), Input::Initializer(quantize_min_tensor)); Tensor quantize_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantize_max_tensor, {255.0f}); Output quantize_max_op = Const(root.WithOpName("quantize_max_op"), Input::Initializer(quantize_max_tensor)); QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op, quantize_min_op, quantize_max_op, DT_QINT32, QuantizeV2::Attrs().Mode("MIN_FIRST")); Tensor fake_requantize_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_requantize_min_tensor, {0.0f}); Output fake_requantize_min_op = Const(root.WithOpName("fake_requantize_min_op"), Input::Initializer(fake_requantize_min_tensor)); Tensor fake_requantize_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_requantize_max_tensor, {255.0f}); Output fake_requantize_max_op = Const(root.WithOpName("fake_requantize_max_op"), Input::Initializer(fake_requantize_max_tensor)); Requantize fake_requantize_op( root.WithOpName("fake_requantize_op"), quantize_op.output, quantize_op.output_min, quantize_op.output_max, fake_requantize_min_op, fake_requantize_max_op, DT_QUINT8); Output fake_dequantize_op = Dequantize( root.WithOpName("fake_dequantize_op"), fake_requantize_op.output, fake_requantize_op.output_min, fake_requantize_op.output_max, Dequantize::Attrs().Mode("MIN_FIRST")); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(MergeAdjacentRequantizes, float_graph_def, {}, {}, {"fake_dequantize_op"}, {}, 1.0, &converted_graph_def); int requantize_count = 0; for (const NodeDef& node : converted_graph_def.node()) { if (node.op() == "Requantize") { ++requantize_count; } } EXPECT_EQ(1, requantize_count); } void TestConvertFakeQuantsEndToEnd() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); int requantize_count = 0; for (const NodeDef& node : converted_graph_def.node()) { EXPECT_NE("FakeQuantWithMinMaxVars", node.op()); if (node.op() == "Requantize") { ++requantize_count; } } EXPECT_EQ(1, requantize_count); } void TestHoistFakeQuants() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6})); test::FillIota<float>(&input_tensor, 1); Output input_op = Const(root.WithOpName("input_op"), Input::Initializer(input_tensor)); Tensor offset_tensor(DT_FLOAT, TensorShape({6})); test::FillIota<float>(&offset_tensor, 1); Output offset_op = Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor)); Output bias_add_op = BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op); Output relu_op = Relu(root.WithOpName("relu_op"), bias_add_op); Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), relu_op, {1, 2, 2, 1}, {1, 1, 1, 1}, "SAME"); Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_min_tensor, {0.0f}); Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"), Input::Initializer(fake_quant_min_tensor)); Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&fake_quant_max_tensor, {18.0f}); Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"), Input::Initializer(fake_quant_max_tensor)); Output fake_quant_op = FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), max_pool_op, fake_quant_min_op, fake_quant_max_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef converted_graph_def; TestTransformedVersusFloatGraph(HoistFakeQuants, float_graph_def, {}, {}, {"fake_quant_op"}, {}, 1.0, &converted_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(converted_graph_def, &node_map); EXPECT_EQ("MaxPool", node_map.at("fake_quant_op")->op()); EXPECT_EQ("FakeQuantWithMinMaxVars", node_map.at(node_map.at("relu_op")->input(0))->op()); } void TestMergeDuplicateQuantizes() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor quantized_tensor(DT_QUINT8, TensorShape({})); test::FillValues<quint8>(&quantized_tensor, {0}); Output quantized_op = Const(root.WithOpName("quantized_op"), Input::Initializer(quantized_tensor)); Tensor quantized_min_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_min_tensor, {2.0f}); Output quantized_min_op = Const(root.WithOpName("quantized_min_op"), Input::Initializer(quantized_min_tensor)); Tensor quantized_max_tensor(DT_FLOAT, TensorShape({})); test::FillValues<float>(&quantized_max_tensor, {2.0f}); Output quantized_max_op = Const(root.WithOpName("quantized_max_op"), Input::Initializer(quantized_min_tensor)); Output dequantize_op = Dequantize(root.WithOpName("dequantize_op"), quantized_op, quantized_min_op, quantized_max_op); Tensor quantize_reshape_dims1_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&quantize_reshape_dims1_tensor, {-1}); Output quantize_reshape_dims1 = Const(root.WithOpName("dequantize_reshape_dims1"), Input::Initializer(quantize_reshape_dims1_tensor)); Tensor quantize_reduction_dims1_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&quantize_reduction_dims1_tensor, {0}); Output quantize_reduction_dims1 = Const(root.WithOpName("quantize_reduction_dims1"), Input::Initializer(quantize_reduction_dims1_tensor)); Output quantize_reshape1 = Reshape(root.WithOpName("quantize_reshape1"), dequantize_op, quantize_reshape_dims1); Output quantize_min1 = Min(root.WithOpName("quantize_min1"), quantize_reshape1, quantize_reduction_dims1, Min::Attrs().KeepDims(false)); Output quantize_max1 = Max(root.WithOpName("quantize_max1"), quantize_reshape1, quantize_reduction_dims1, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op1(root.WithOpName("quantize_op1"), dequantize_op, quantize_min1, quantize_max1, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Tensor quantize_reshape_dims2_tensor(DT_INT32, TensorShape({1})); test::FillValues<int32>(&quantize_reshape_dims2_tensor, {-1}); Output quantize_reshape_dims2 = Const(root.WithOpName("dequantize_reshape_dims2"), Input::Initializer(quantize_reshape_dims2_tensor)); Tensor quantize_reduction_dims2_tensor(DT_INT32, TensorShape({})); test::FillValues<int32>(&quantize_reduction_dims2_tensor, {0}); Output quantize_reduction_dims2 = Const(root.WithOpName("quantize_reduction_dims2"), Input::Initializer(quantize_reduction_dims2_tensor)); Output quantize_reshape2 = Reshape(root.WithOpName("quantize_reshape2"), dequantize_op, quantize_reshape_dims2); Output quantize_min2 = Min(root.WithOpName("quantize_min2"), quantize_reshape2, quantize_reduction_dims2, Min::Attrs().KeepDims(false)); Output quantize_max2 = Max(root.WithOpName("quantize_max2"), quantize_reshape2, quantize_reduction_dims2, Max::Attrs().KeepDims(false)); QuantizeV2 quantize_op2(root.WithOpName("quantize_op2"), dequantize_op, quantize_min1, quantize_max1, DT_QUINT8, QuantizeV2::Attrs().Mode("MIN_FIRST")); Output final_dequantize1 = Dequantize(root.WithOpName("final_dequantize1"), quantize_op1.output, quantize_op1.output_min, quantize_op1.output_max); Output final_dequantize2 = Dequantize(root.WithOpName("final_dequantize2"), quantize_op2.output, quantize_op2.output_min, quantize_op2.output_max); Output add_op = Add(root.WithOpName("add_op"), final_dequantize1, final_dequantize2); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"add_op"}, {}, 1.0, &merged_graph_def); std::map<string, int> op_map; for (const NodeDef& node : merged_graph_def.node()) { ++op_map[node.op()]; } EXPECT_EQ(1, op_map["QuantizeV2"]); } void TestMergeDuplicateConsts() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output add_op = Add(root.WithOpName("add_op"), a_op, b_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op"))); string remaining_const; if (node_map.count("a_op")) { remaining_const = "a_op"; } else { remaining_const = "b_op"; } EXPECT_EQ(remaining_const, node_map["add_op"]->input(0)); EXPECT_EQ(remaining_const, node_map["add_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op"]->input(1)); } void TestMergeDuplicatesNested() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {}, {}, {"mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op"))); EXPECT_EQ(1, (node_map.count("a_relu_op") + node_map.count("b_relu_op"))); string remaining_relu; if (node_map.count("a_relu_op")) { remaining_relu = "a_relu_op"; } else { remaining_relu = "b_relu_op"; } EXPECT_EQ(remaining_relu, node_map["add_op"]->input(0)); EXPECT_EQ(remaining_relu, node_map["add_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op"]->input(1)); } void TestMergeDuplicatesInOut() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; const int width = 10; Tensor a_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&a_tensor, 1.0f); Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor)); Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op); Tensor b_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&b_tensor, 1.0f); Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor)); Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op); Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op); Tensor c_tensor(DT_FLOAT, TensorShape({width})); test::FillIota<float>(&c_tensor, 2.0f); Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor)); Output mul_op1 = Mul(root.WithOpName("mul_op1"), add_op, c_op); Output mul_op2 = Mul(root.WithOpName("mul_op2"), add_op, c_op); Output mul_op3 = Mul(root.WithOpName("mul_op3"), add_op, c_op); Output final_mul_op = Mul(root.WithOpName("final_mul_op"), mul_op2, mul_op3); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef merged_graph_def; TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {{"a_op", a_tensor}}, {{"a_op", a_tensor}}, {"mul_op1", "final_mul_op"}, {}, 1.0, &merged_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(merged_graph_def, &node_map); EXPECT_EQ(1, node_map.count("a_op")); EXPECT_EQ(1, node_map.count("b_op")); EXPECT_EQ(1, node_map.count("a_relu_op")); EXPECT_EQ(1, node_map.count("b_relu_op")); EXPECT_EQ(1, node_map.count("mul_op1")); EXPECT_EQ(1, node_map.count("final_mul_op")); EXPECT_EQ(1, (node_map.count("mul_op2") + node_map.count("mul_op3"))); string remaining_mul; if (node_map.count("mul_op2")) { remaining_mul = "mul_op2"; } else { remaining_mul = "mul_op3"; } EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(0)); EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(1)); EXPECT_EQ(1, node_map.count("c_op")); EXPECT_EQ("add_op", node_map["mul_op1"]->input(0)); EXPECT_EQ("c_op", node_map["mul_op1"]->input(1)); } void TestExcludeNonFloat() { auto root = tensorflow::Scope::NewRootScope(); using namespace ::tensorflow::ops; Tensor int_constant_tensor(DT_INT32, TensorShape({4, 5})); test::FillIota<int32>(&int_constant_tensor, 1); Output int_constant = Const(root.WithOpName("int_constant"), Input::Initializer(int_constant_tensor)); Tensor float_constant_tensor(DT_FLOAT, TensorShape({4, 5})); test::FillIota<float>(&float_constant_tensor, 2.0f); Output float_constant = Const(root.WithOpName("float_constant"), Input::Initializer(float_constant_tensor)); Output excluded_reshape_op = Reshape(root.WithOpName("excluded_reshape_op"), int_constant, {10, 2}); Output included_reshape_op = Reshape(root.WithOpName("included_reshape_op"), float_constant, {10, 2}); Output excluded_relu_op = Relu(root.WithOpName("excluded_relu_op"), excluded_reshape_op); Output excluded_float_caster = Cast( root.WithOpName("excluded_float_caster"), excluded_relu_op, DT_FLOAT); Output included_relu_op = Relu(root.WithOpName("included_relu_op"), included_reshape_op); GraphDef float_graph_def; TF_ASSERT_OK(root.ToGraphDef(&float_graph_def)); GraphDef quantized_graph_def; TestTransformedVersusFloatGraph( QuantizeNodes, float_graph_def, {}, {}, {"excluded_float_caster", "included_relu_op"}, {}, 1.0, &quantized_graph_def); std::map<string, const NodeDef*> node_map; MapNamesToNodes(quantized_graph_def, &node_map); ASSERT_EQ(1, node_map.count("excluded_reshape_op")); EXPECT_EQ("Reshape", node_map.at("excluded_reshape_op")->op()); ASSERT_EQ(1, node_map.count("included_reshape_op")); EXPECT_EQ("Dequantize", node_map.at("included_reshape_op")->op()); } }; TEST_F(QuantizeNodesTest, TestIgnoreOps) { TestIgnoreOps({}); TestIgnoreOps({"MatMul"}); TestIgnoreOps({"MatMul", "Mul"}); } TEST_F(QuantizeNodesTest, TestQuantizeMatMulTiny) { TestQuantizeMatMulTiny(); } TEST_F(QuantizeNodesTest, TestQuantizeMatMulSmall) { TestQuantizeMatMulSmall(); } TEST_F(QuantizeNodesTest, TestQuantizeMul) { TestQuantizeMul(); } TEST_F(QuantizeNodesTest, TestQuantizeAdd) { TestQuantizeAdd(); } TEST_F(QuantizeNodesTest, TestOddPaddingProblem) { TestQuantizeConv2D(1, 4, 4, 1, 3, 1, 2, "SAME", {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); } TEST_F(QuantizeNodesTest, TestQuantizeConv2D) { TestQuantizeConv2D(1, 4, 3, 1, 3, 1, 1, "SAME", {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}, {1, 4, 7, 2, 5, 8, 3, 6, 9}); } TEST_F(QuantizeNodesTest, TestQuantizeBiasAdd) { TestQuantizeBiasAdd(); } TEST_F(QuantizeNodesTest, TestQuantizeConcat) { TestQuantizeConcat(); } TEST_F(QuantizeNodesTest, TestQuantizeRelu) { TestQuantizeRelu(); } TEST_F(QuantizeNodesTest, TestQuantizeRelu6) { TestQuantizeRelu6(); } TEST_F(QuantizeNodesTest, TestQuantizeMaxPool) { TestQuantizeMaxPool(); } TEST_F(QuantizeNodesTest, TestQuantizeAvgPool) { TestQuantizeAvgPool(); } TEST_F(QuantizeNodesTest, TestQuantizeReshape) { TestQuantizeReshape(); } TEST_F(QuantizeNodesTest, TestQuantizeResizeBilinear) { TestQuantizeResizeBilinear(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantization) { TestRemoveRedundantQuantization(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithBiasAdd) { TestRemoveRedundantQuantizationWithBiasAdd(); } TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithMultipleOutputs) { TestRemoveRedundantQuantizationWithMultipleOutputs(); } TEST_F(QuantizeNodesTest, TestQuantizePlaceholders) { TestQuantizePlaceholders(); } TEST_F(QuantizeNodesTest, TestInputRange) { TestInputRange(); } TEST_F(QuantizeNodesTest, TestFallbackRange) { TestFallbackRange(); } TEST_F(QuantizeNodesTest, TestConvertFakeQuantsToRequantize) { TestConvertFakeQuantsToRequantize(); } TEST_F(QuantizeNodesTest, TestMergeAdjacentRequantizes) { TestMergeAdjacentRequantizes(); } TEST_F(QuantizeNodesTest, TestConvertFakeQuantsEndToEnd) { TestConvertFakeQuantsEndToEnd(); } TEST_F(QuantizeNodesTest, TestHoistFakeQuants) { TestHoistFakeQuants(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateQuantizes) { TestMergeDuplicateQuantizes(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateConsts) { TestMergeDuplicateConsts(); } TEST_F(QuantizeNodesTest, TestMergeDuplicatesNested) { TestMergeDuplicatesNested(); } TEST_F(QuantizeNodesTest, TestMergeDuplicateInOut) { TestMergeDuplicatesInOut(); } TEST_F(QuantizeNodesTest, TestExcludeNonFloat) { TestExcludeNonFloat(); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2d10528f-a1a6-4888-a5d3-6ca40943cf7b
cpp
google/quiche
quiche_lower_case_string
quiche/common/platform/api/quiche_lower_case_string.h
quiche/common/platform/api/quiche_lower_case_string_test.cc
#ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_ #define QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_ #include "quiche_platform_impl/quiche_lower_case_string_impl.h" namespace quiche { using QuicheLowerCaseString = QuicheLowerCaseStringImpl; } #endif
#include "quiche/common/platform/api/quiche_lower_case_string.h" #include "absl/strings/string_view.h" #include "quiche/common/platform/api/quiche_test.h" namespace quiche::test { namespace { TEST(QuicheLowerCaseString, Basic) { QuicheLowerCaseString empty(""); EXPECT_EQ("", empty.get()); QuicheLowerCaseString from_lower_case("foo"); EXPECT_EQ("foo", from_lower_case.get()); QuicheLowerCaseString from_mixed_case("BaR"); EXPECT_EQ("bar", from_mixed_case.get()); const absl::string_view kData = "FooBar"; QuicheLowerCaseString from_string_view(kData); EXPECT_EQ("foobar", from_string_view.get()); } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string.h
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
476923f3-b0e5-4116-ab25-59dc056d648d
cpp
tensorflow/tensorflow
grpc_dispatcher_impl
tensorflow/core/data/service/grpc_dispatcher_impl.cc
tensorflow/core/data/service/grpc_dispatcher_impl_test.cc
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h" #include "grpcpp/server_context.h" #include "tensorflow/core/data/service/export.pb.h" #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h" #include "tensorflow/core/protobuf/service_config.pb.h" namespace tensorflow { namespace data { using ::grpc::ServerBuilder; using ::grpc::ServerContext; GrpcDispatcherImpl::GrpcDispatcherImpl( const experimental::DispatcherConfig& config, ServerBuilder& server_builder) : impl_(config) { server_builder.RegisterService(this); VLOG(1) << "Registered data service dispatcher"; } Status GrpcDispatcherImpl::Start() { return impl_.Start(); } void GrpcDispatcherImpl::Stop() { impl_.Stop(); } size_t GrpcDispatcherImpl::NumActiveIterations() { return impl_.NumActiveIterations(); } DispatcherStateExport GrpcDispatcherImpl::ExportState() const { return impl_.ExportState(); } #define HANDLER(method) \ grpc::Status GrpcDispatcherImpl::method(ServerContext* context, \ const method##Request* request, \ method##Response* response) { \ return ToGrpcStatus(impl_.method(request, response)); \ } HANDLER(WorkerHeartbeat); HANDLER(WorkerUpdate); HANDLER(GetDatasetDef); HANDLER(GetSplit); HANDLER(GetVersion); HANDLER(GetOrRegisterDataset); HANDLER(ReleaseIterationClient); HANDLER(MaybeRemoveTask); HANDLER(GetOrCreateJob); HANDLER(GetOrCreateIteration); HANDLER(ClientHeartbeat); HANDLER(GetWorkers); HANDLER(GetDataServiceMetadata); HANDLER(GetDataServiceConfig); HANDLER(Snapshot); HANDLER(GetSnapshotSplit); HANDLER(GetSnapshotStreams); HANDLER(DisableCompressionAtRuntime); #undef HANDLER } }
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h" #include <limits> #include <memory> #include <string> #include <utility> #include "grpcpp/channel.h" #include "grpcpp/client_context.h" #include "grpcpp/create_channel.h" #include "grpcpp/security/credentials.h" #include "grpcpp/support/channel_arguments.h" #include "absl/strings/str_cat.h" #include "tensorflow/core/data/service/common.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/credentials_factory.h" #include "tensorflow/core/data/service/dispatcher.pb.h" #include "tensorflow/core/data/service/server_lib.h" #include "tensorflow/core/data/service/test_util.h" #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/service_config.pb.h" namespace tensorflow { namespace data { namespace { using ::grpc::Channel; using ::grpc::ChannelArguments; using ::grpc::ChannelCredentials; using ::grpc::ClientContext; constexpr const char kHostAddress[] = "localhost"; constexpr const char kProtocol[] = "grpc"; class GrpcDispatcherImplTest : public ::testing::Test { protected: void SetUp() override { TF_ASSERT_OK(SetUpDispatcherServer()); TF_ASSERT_OK(SetUpDispatcherClientStub()); } Status SetUpDispatcherServer() { experimental::DispatcherConfig config; config.set_protocol(kProtocol); TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_)); return dispatcher_server_->Start(); } Status SetUpDispatcherClientStub() { std::shared_ptr<ChannelCredentials> credentials; TF_RETURN_IF_ERROR( CredentialsFactory::CreateClientCredentials(kProtocol, &credentials)); ChannelArguments args; args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max()); args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true); std::shared_ptr<Channel> channel = ::grpc::CreateCustomChannel(GetDispatcherAddress(), credentials, args); dispatcher_client_stub_ = DispatcherService::NewStub(channel); return absl::OkStatus(); } std::string GetDispatcherAddress() const { return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort()); } std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_; std::unique_ptr<DispatcherService::Stub> dispatcher_client_stub_; }; TEST_F(GrpcDispatcherImplTest, GrpcTest) { ClientContext ctx; GetVersionRequest req; GetVersionResponse resp; TF_ASSERT_OK( FromGrpcStatus(dispatcher_client_stub_->GetVersion(&ctx, req, &resp))); EXPECT_EQ(resp.version(), kDataServiceVersion); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
145e02f9-e296-4fae-91d2-7cfe3e2ddd1e
cpp
tensorflow/tensorflow
mkl_conv_ops
tensorflow/core/kernels/mkl/mkl_conv_ops.cc
tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc
#ifdef INTEL_MKL #include "tensorflow/core/kernels/mkl/mkl_conv_ops.h" #include <algorithm> #include <map> #include <string> #include <unordered_map> #include "absl/strings/str_join.h" #include "tensorflow/core/kernels/mkl/mkl_kernel_util.h" #include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h" #include "tensorflow/core/kernels/no_op.h" #if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP) #include "tensorflow/core/platform/mutex.h" #endif using dnnl::convolution_forward; using dnnl::prop_kind; using dnnl::stream; using ConvFwdPd = dnnl::convolution_forward::primitive_desc; using ReorderPd = dnnl::reorder::primitive_desc; namespace tensorflow { #ifndef ENABLE_ONEDNN_V3 #define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \ scales_mask, scales) \ append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding, scales_mask, \ scales) #define APPEND_ELTWISE(scale, alg, alpha, beta) \ append_eltwise(scale, alg, alpha, beta) #define GET_DATA_TYPE data_type() #define SET_FUSE_ACTIVATION_FOR_RELU6 \ set_fuse_activation(true, dnnl::algorithm::eltwise_bounded_relu, 6.0) #define SET_MKL_LAYOUT(md) SetMklLayout(&md) #define OUTPUT_SCALE_DCHECK (post_op_param.name == "output_scale") #define TSCALED_BIAS Tbias #define SCALE scales #define SUMMAND_SCALE_U8(summand_range, output_range) \ summand_range / output_range #define SUMMAND_SCALE_S8(summand_range, output_range) \ 255.0f * summand_range / (output_range * 127.0f) #else #define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \ scales_mask, scales) \ append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding) #define APPEND_ELTWISE(scale, alg, alpha, beta) \ append_eltwise(alg, alpha, beta); \ (void)scale #define GET_DATA_TYPE get_data_type() #define SET_FUSE_ACTIVATION_FOR_RELU6 \ set_fuse_activation(true, dnnl::algorithm::eltwise_clip, 0.0, 6.0) #define SET_MKL_LAYOUT(md) SetMklLayout(md) #define OUTPUT_SCALE_DCHECK \ (post_op_param.name == "src_scale") || \ (post_op_param.name == "wei_scale") || \ (post_op_param.name == "dst_scale") #define TSCALED_BIAS float #define SCALE wei_scale #define SUMMAND_SCALE_U8(summand_range, output_range) summand_range / 255.0f #define SUMMAND_SCALE_S8(summand_range, output_range) summand_range / 127.0f #endif #if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3) #define FWD_STREAM , *fwd_stream #else #define FWD_STREAM #endif namespace quantized_fusions { string none[] = {""}; string bias[] = {"BiasAdd"}; string relu[] = {"Relu"}; string requantize[] = {"Requantize"}; string bias_relu[] = {"BiasAdd", "Relu"}; string bias_requantize[] = {"BiasAdd", "Requantize"}; string relu_requantize[] = {"Relu", "Requantize"}; string bias_relu_requantize[] = {"BiasAdd", "Relu", "Requantize"}; string bias_sum_relu[] = {"BiasAdd", "Sum", "Relu"}; string bias_sum_relu_requantize[] = {"BiasAdd", "Sum", "Relu", "Requantize"}; } struct MklConvFwdParams { memory::dims src_dims; memory::dims filter_dims; memory::dims bias_dims; memory::dims dst_dims; memory::dims strides; memory::dims dilations; memory::dims padding_left; memory::dims padding_right; memory::dims fuse_bn_dims; MklTensorFormat tf_fmt; bool native_format; bool is_depthwise; bool is_filter_const = false; string dtypes = string(""); struct PostOpParam { string name; dnnl::algorithm alg; std::vector<float> param; std::string partial_key; DataType dtype = DT_INVALID; }; std::vector<PostOpParam> post_op_params; MklConvFwdParams(memory::dims src_dims, memory::dims filter_dims, memory::dims bias_dims, memory::dims dst_dims, memory::dims strides, memory::dims dilations, memory::dims padding_left, memory::dims padding_right, memory::dims fuse_bn_dims, MklTensorFormat tf_fmt, bool native_format, bool is_depthwise, bool is_filter_const) : src_dims(src_dims), filter_dims(filter_dims), bias_dims(bias_dims), dst_dims(dst_dims), strides(strides), dilations(dilations), padding_left(padding_left), padding_right(padding_right), fuse_bn_dims(fuse_bn_dims), tf_fmt(tf_fmt), native_format(native_format), is_depthwise(is_depthwise), is_filter_const(is_filter_const) {} }; template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput> class MklConvFwdPrimitive : public MklPrimitive { public: explicit MklConvFwdPrimitive(const MklConvFwdParams& convFwdDims) : MklPrimitive(engine(engine::kind::cpu, 0)) { if (context_.conv_fwd == nullptr) { Setup(convFwdDims); } } ~MklConvFwdPrimitive() {} dnnl::memory::desc GetScratchPadDesc() { return context_.fwd_pd->scratchpad_desc(); } void Execute(const Tinput* src_data, const Tfilter* filter_data, const void* bias_data, const Toutput* dst_data, const MklConvFwdParams& convFwdDims, std::shared_ptr<stream> fwd_stream, void* sp_data = nullptr) { Execute(src_data, filter_data, bias_data, dst_data, nullptr, nullptr, nullptr, nullptr, convFwdDims, fwd_stream, sp_data); } void Execute(const Tinput* src_data, const Tfilter* filter_data, const void* bias_data, const Toutput* dst_data, const Tinput* bn_scale_data, const Tinput* bn_mean_data, const Tinput* bn_offset_data, const Tinput* bn_rsqrt_data, const MklConvFwdParams& convFwdDims, std::shared_ptr<stream> fwd_stream, void* sp_data) { #if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP) mutex_lock lock(primitive_execution_mu_); #endif context_.src_mem->set_data_handle( static_cast<void*>(const_cast<Tinput*>(src_data)) FWD_STREAM); context_.filter_mem->set_data_handle( static_cast<void*>(const_cast<Tfilter*>(filter_data)) FWD_STREAM); if (bias_data != nullptr) { context_.bias_mem->set_data_handle(const_cast<void*>(bias_data) FWD_STREAM); } auto const& post_op_params = convFwdDims.post_op_params; if (!post_op_params.empty()) { for (auto const& post_op_param : post_op_params) { if (post_op_param.name == "src_scale") { context_.src_scale_mem->set_data_handle(static_cast<void*>( const_cast<float*>(post_op_param.param.data())) FWD_STREAM); } else if (post_op_param.name == "wei_scale") { context_.wei_scale_mem->set_data_handle(static_cast<void*>( const_cast<float*>(post_op_param.param.data())) FWD_STREAM); } else if (post_op_param.name == "dst_scale") { context_.dst_scale_mem->set_data_handle(static_cast<void*>( const_cast<float*>(post_op_param.param.data())) FWD_STREAM); } } } if (bn_scale_data != nullptr) { context_.bn_scale_mem->set_data_handle( static_cast<void*>(const_cast<Tinput*>(bn_scale_data)) FWD_STREAM); context_.bn_mean_mem->set_data_handle( static_cast<void*>(const_cast<Tinput*>(bn_mean_data)) FWD_STREAM); context_.bn_rsqrt_mem->set_data_handle( static_cast<void*>(const_cast<Tinput*>(bn_rsqrt_data)) FWD_STREAM); context_.bn_offset_mem->set_data_handle( static_cast<void*>(const_cast<Tinput*>(bn_offset_data)) FWD_STREAM); } context_.dst_mem->set_data_handle( static_cast<void*>(const_cast<Toutput*>(dst_data)) FWD_STREAM); if (sp_data) { context_.sp_mem->set_data_handle(static_cast<void*>(sp_data) FWD_STREAM); } DCHECK_EQ(context_.fwd_primitives.size(), context_.fwd_primitives_args.size()); for (size_t i = 0; i < context_.fwd_primitives.size(); ++i) { context_.fwd_primitives.at(i).execute(*fwd_stream, context_.fwd_primitives_args.at(i)); } context_.src_mem->set_data_handle(DummyData); context_.filter_mem->set_data_handle(DummyData); if (bias_data != nullptr) { context_.bias_mem->set_data_handle(DummyData); } if (bn_scale_data != nullptr) { context_.bn_scale_mem->set_data_handle(DummyData); context_.bn_mean_mem->set_data_handle(DummyData); context_.bn_rsqrt_mem->set_data_handle(DummyData); context_.bn_offset_mem->set_data_handle(DummyData); } context_.dst_mem->set_data_handle(DummyData); if (sp_data) { context_.sp_mem->set_data_handle(DummyData); } } void Execute(const Tinput* src_data, const Tfilter* filter_data, const Toutput* dst_data, const MklConvFwdParams& convFwdDims, std::shared_ptr<stream> fwd_stream, void* sp_data) { Execute(src_data, filter_data, nullptr, dst_data, nullptr, nullptr, nullptr, nullptr, convFwdDims, fwd_stream, sp_data); } std::shared_ptr<ConvFwdPd> GetPrimitiveDesc() const { return context_.fwd_pd; } private: struct ConvFwdContext { std::shared_ptr<dnnl::memory> src_mem; std::shared_ptr<dnnl::memory> filter_mem; std::shared_ptr<dnnl::memory> bias_mem; std::shared_ptr<dnnl::memory> dst_mem; std::shared_ptr<dnnl::memory> sp_mem; std::shared_ptr<dnnl::memory> bn_scale_mem; std::shared_ptr<dnnl::memory> bn_mean_mem; std::shared_ptr<dnnl::memory> bn_rsqrt_mem; std::shared_ptr<dnnl::memory> bn_offset_mem; std::shared_ptr<dnnl::memory> src_scale_mem; std::shared_ptr<dnnl::memory> wei_scale_mem; std::shared_ptr<dnnl::memory> dst_scale_mem; #ifndef ENABLE_ONEDNN_V3 std::shared_ptr<dnnl::convolution_forward::desc> fwd_desc; #endif std::shared_ptr<ConvFwdPd> fwd_pd; std::shared_ptr<dnnl::memory::desc> src_md; std::shared_ptr<dnnl::memory::desc> filter_md; std::shared_ptr<dnnl::memory::desc> bias_md; std::shared_ptr<dnnl::memory::desc> dst_md; std::shared_ptr<dnnl::memory::desc> bn_scale_md; std::shared_ptr<dnnl::memory::desc> bn_mean_md; std::shared_ptr<dnnl::memory::desc> bn_rsqrt_md; std::shared_ptr<dnnl::memory::desc> bn_offset_md; std::shared_ptr<dnnl::memory::desc> src_scale_md; std::shared_ptr<dnnl::memory::desc> wei_scale_md; std::shared_ptr<dnnl::memory::desc> dst_scale_md; std::shared_ptr<dnnl::primitive> conv_fwd; std::vector<dnnl::primitive> fwd_primitives; std::vector<std::unordered_map<int, memory>> fwd_primitives_args; ConvFwdContext() : src_mem(nullptr), filter_mem(nullptr), bias_mem(nullptr), dst_mem(nullptr), sp_mem(nullptr), bn_scale_mem(nullptr), bn_mean_mem(nullptr), bn_rsqrt_mem(nullptr), bn_offset_mem(nullptr), src_scale_mem(nullptr), wei_scale_mem(nullptr), dst_scale_mem(nullptr), #ifndef ENABLE_ONEDNN_V3 fwd_desc(nullptr), #endif fwd_pd(nullptr), src_md(nullptr), filter_md(nullptr), bias_md(nullptr), dst_md(nullptr), bn_scale_md(nullptr), bn_mean_md(nullptr), bn_rsqrt_md(nullptr), bn_offset_md(nullptr), src_scale_md(nullptr), wei_scale_md(nullptr), dst_scale_md(nullptr), conv_fwd(nullptr) { } }; void Setup(const MklConvFwdParams& convFwdDims) { memory::format_tag user_data_fmt; if (convFwdDims.native_format) { user_data_fmt = MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt); } else { user_data_fmt = memory::format_tag::any; } context_.src_md.reset(new memory::desc( {convFwdDims.src_dims}, MklDnnType<Tinput>(), user_data_fmt)); if (convFwdDims.filter_dims.size() == 4 && !convFwdDims.is_filter_const && std::is_same<Tfilter, float>::value && convFwdDims.src_dims[MklDnnDims::Dim_N] == 1) { context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims}, MklDnnType<Tfilter>(), memory::format_tag::hwio)); } else { context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims}, MklDnnType<Tfilter>(), memory::format_tag::any)); } context_.dst_md.reset(new memory::desc( {convFwdDims.dst_dims}, MklDnnType<Toutput>(), user_data_fmt)); if (!convFwdDims.bias_dims.empty()) { if (std::is_same<Tbias, qint32>::value) { context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims}, MklDnnType<TSCALED_BIAS>(), memory::format_tag::any)); } else { context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims}, MklDnnType<Tbias>(), memory::format_tag::any)); } #ifndef ENABLE_ONEDNN_V3 context_.fwd_desc.reset(new convolution_forward::desc( prop_kind::forward, dnnl::algorithm::convolution_direct, *context_.src_md, *context_.filter_md, *context_.bias_md, *context_.dst_md, convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left, convFwdDims.padding_right)); } else { context_.fwd_desc.reset(new convolution_forward::desc( prop_kind::forward, dnnl::algorithm::convolution_direct, *context_.src_md, *context_.filter_md, *context_.dst_md, convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left, convFwdDims.padding_right)); #endif } if (!convFwdDims.fuse_bn_dims.empty()) { const memory::format_tag fused_bn_arg_fmt = convFwdDims.native_format ? user_data_fmt : MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt); context_.bn_scale_md.reset(new memory::desc( {convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt)); context_.bn_mean_md.reset(new memory::desc( {convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt)); context_.bn_rsqrt_md.reset(new memory::desc( {convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt)); context_.bn_offset_md.reset(new memory::desc( {convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt)); } auto const& post_op_params = convFwdDims.post_op_params; dnnl::primitive_attr post_ops_attr; dnnl::post_ops post_ops; post_ops_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user); std::unordered_map<string, bool> is_scale_set; if (!post_op_params.empty()) { for (auto const& post_op_param : post_op_params) { if (post_op_param.name == "activation") { DCHECK_EQ(post_op_param.param.size(), 3); float op_scale = post_op_param.param[0]; float op_alpha = post_op_param.param[1]; float op_beta = post_op_param.param[2]; post_ops.APPEND_ELTWISE(op_scale, post_op_param.alg, op_alpha, op_beta); } else if (post_op_param.name == "sum") { DCHECK_EQ(post_op_param.param.size(), 1); float op_scale = post_op_param.param[0]; #ifndef ENABLE_ONEDNN_V3 post_ops.append_sum(op_scale); #else if (post_op_param.dtype != DT_INVALID) { if (post_op_param.dtype == DT_FLOAT) { post_ops.append_sum(op_scale, 0, MklDnnType<float>()); } else { TF_CHECK_OK(absl::FailedPreconditionError( "Summand data type is expected to be float")); } } else { post_ops.append_sum(op_scale); } #endif #ifndef ENABLE_ONEDNN_V3 } else if (post_op_param.name == "output_scale") { if (post_op_param.param.size() == 1) { post_ops_attr.set_output_scales(0, post_op_param.param); } else { post_ops_attr.set_output_scales(2, post_op_param.param); } #else } else if (post_op_param.name == "src_scale") { is_scale_set.insert({"src", true}); post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0); context_.src_scale_md.reset(new memory::desc({1}, MklDnnType<float>(), memory::format_tag::x)); context_.src_scale_mem.reset( new memory(*context_.src_scale_md, cpu_engine_, DummyData)); } else if (post_op_param.name == "wei_scale") { is_scale_set.insert({"wei", true}); const int scale_size = post_op_param.param.size(); const int mask = scale_size == 1 ? 0 : convFwdDims.is_depthwise ? 3 : 1; post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask); context_.wei_scale_md.reset(new memory::desc( {scale_size}, MklDnnType<float>(), memory::format_tag::x)); context_.wei_scale_mem.reset( new memory(*context_.wei_scale_md, cpu_engine_, DummyData)); } else if (post_op_param.name == "dst_scale") { is_scale_set.insert({"dst", true}); post_ops_attr.set_scales_mask(DNNL_ARG_DST, 0); context_.dst_scale_md.reset(new memory::desc({1}, MklDnnType<float>(), memory::format_tag::x)); context_.dst_scale_mem.reset( new memory(*context_.dst_scale_md, cpu_engine_, DummyData)); #endif } else if (post_op_param.name == "fuse_bn") { post_ops.append_binary(dnnl::algorithm::binary_sub, *context_.bn_mean_md); post_ops.append_binary(dnnl::algorithm::binary_mul, *context_.bn_rsqrt_md); post_ops.append_binary(dnnl::algorithm::binary_mul, *context_.bn_scale_md); post_ops.append_binary(dnnl::algorithm::binary_add, *context_.bn_offset_md); } else { DCHECK((post_op_param.name == "activation") || (post_op_param.name == "sum") || OUTPUT_SCALE_DCHECK || (post_op_param.name == "fuse_bn")); } } post_ops_attr.set_post_ops(post_ops); } #ifndef ENABLE_ONEDNN_V3 context_.fwd_pd.reset( new ConvFwdPd(*context_.fwd_desc, post_ops_attr, cpu_engine_)); #else if (!convFwdDims.bias_dims.empty()) { context_.fwd_pd.reset(new ConvFwdPd( cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct, *context_.src_md, *context_.filter_md, *context_.bias_md, *context_.dst_md, convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left, convFwdDims.padding_right, post_ops_attr)); } else { context_.fwd_pd.reset(new ConvFwdPd( cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct, *context_.src_md, *context_.filter_md, *context_.dst_md, convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left, convFwdDims.padding_right, post_ops_attr)); } #endif context_.src_mem.reset( new memory(context_.fwd_pd.get()->src_desc(), cpu_engine_, DummyData)); context_.filter_mem.reset(new memory(context_.fwd_pd.get()->weights_desc(), cpu_engine_, DummyData)); context_.dst_mem.reset( new memory(context_.fwd_pd.get()->dst_desc(), cpu_engine_, DummyData)); context_.conv_fwd.reset(new convolution_forward(*context_.fwd_pd)); auto scratchpad_md = context_.fwd_pd->scratchpad_desc(); context_.sp_mem.reset( new dnnl::memory(scratchpad_md, cpu_engine_, DummyData)); std::unordered_map<int, memory> net_args; if (!convFwdDims.bias_dims.empty()) { context_.bias_mem.reset(new memory(context_.fwd_pd.get()->bias_desc(), cpu_engine_, DummyData)); net_args = {{DNNL_ARG_SRC, *context_.src_mem}, {DNNL_ARG_WEIGHTS, *context_.filter_mem}, {DNNL_ARG_BIAS, *context_.bias_mem}, {DNNL_ARG_SCRATCHPAD, *context_.sp_mem}, {DNNL_ARG_DST, *context_.dst_mem}}; #ifdef ENABLE_ONEDNN_V3 if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) { net_args.insert( {{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem}, {DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem}, { DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, *context_.dst_scale_mem }}); } #endif } else if (!convFwdDims.fuse_bn_dims.empty()) { context_.bn_scale_mem.reset( new memory(*context_.bn_scale_md, cpu_engine_, DummyData)); context_.bn_mean_mem.reset( new memory(*context_.bn_mean_md, cpu_engine_, DummyData)); context_.bn_offset_mem.reset( new memory(*context_.bn_offset_md, cpu_engine_, DummyData)); context_.bn_rsqrt_mem.reset( new memory(*context_.bn_rsqrt_md, cpu_engine_, DummyData)); net_args = {{DNNL_ARG_SRC, *context_.src_mem}, {DNNL_ARG_WEIGHTS, *context_.filter_mem}, {DNNL_ARG_DST, *context_.dst_mem}, {DNNL_ARG_SCRATCHPAD, *context_.sp_mem}, {DNNL_ARG_ATTR_MULTIPLE_POST_OP(0) | DNNL_ARG_SRC_1, *context_.bn_mean_mem}, {DNNL_ARG_ATTR_MULTIPLE_POST_OP(1) | DNNL_ARG_SRC_1, *context_.bn_rsqrt_mem}, {DNNL_ARG_ATTR_MULTIPLE_POST_OP(2) | DNNL_ARG_SRC_1, *context_.bn_scale_mem}, {DNNL_ARG_ATTR_MULTIPLE_POST_OP(3) | DNNL_ARG_SRC_1, *context_.bn_offset_mem}}; } else { net_args = {{DNNL_ARG_SRC, *context_.src_mem}, {DNNL_ARG_WEIGHTS, *context_.filter_mem}, {DNNL_ARG_SCRATCHPAD, *context_.sp_mem}, {DNNL_ARG_DST, *context_.dst_mem}}; #ifdef ENABLE_ONEDNN_V3 if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) { net_args.insert( {{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem}, {DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem}, { DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST, *context_.dst_scale_mem }}); } #endif } context_.fwd_primitives_args.push_back(net_args); context_.fwd_primitives.push_back(*context_.conv_fwd); } struct ConvFwdContext context_; #if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP) mutex primitive_execution_mu_; #endif }; template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput> class MklConvFwdPrimitiveFactory : public MklPrimitiveFactory<float> { public: static MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* Get( const MklConvFwdParams& convFwdDims, bool do_not_cache) { MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* conv_fwd = nullptr; if (do_not_cache) { conv_fwd = new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>(convFwdDims); } else { conv_fwd = dynamic_cast<MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>*>( MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias, Toutput>::GetInstance() .GetConvFwd(convFwdDims)); if (conv_fwd == nullptr) { conv_fwd = new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>( convFwdDims); MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias, Toutput>::GetInstance() .SetConvFwd(convFwdDims, conv_fwd); } } return conv_fwd; } private: MklConvFwdPrimitiveFactory() {} ~MklConvFwdPrimitiveFactory() {} static const int kDilationH = 0, kDilationW = 1; static MklConvFwdPrimitiveFactory& GetInstance() { static MklConvFwdPrimitiveFactory instance_; return instance_; } static string CreateKey(const MklConvFwdParams& convFwdDims) { string prefix = "conv_fwd_"; FactoryKeyCreator key_creator; key_creator.AddAsKey(prefix); key_creator.AddAsKey(convFwdDims.src_dims); key_creator.AddAsKey(convFwdDims.filter_dims); key_creator.AddAsKey(convFwdDims.bias_dims); key_creator.AddAsKey(convFwdDims.dst_dims); key_creator.AddAsKey(convFwdDims.strides); key_creator.AddAsKey(convFwdDims.dilations); key_creator.AddAsKey(convFwdDims.padding_left); key_creator.AddAsKey(convFwdDims.padding_right); key_creator.AddAsKey(convFwdDims.dtypes); if (convFwdDims.native_format) { key_creator.AddAsKey(convFwdDims.tf_fmt); } for (auto const& post_op_param : convFwdDims.post_op_params) { key_creator.AddAsKey(post_op_param.name); if (post_op_param.name == "activation") { key_creator.AddAsKey(post_op_param.alg); DCHECK_EQ(post_op_param.param.size(), 3); for (auto& param : post_op_param.param) { key_creator.AddAsKey(param); } } else if (post_op_param.name == "sum") { DCHECK_EQ(post_op_param.param.size(), 1); for (auto& param : post_op_param.param) { key_creator.AddAsKey(param); } #ifndef ENABLE_ONEDNN_V3 } else if (post_op_param.name == "output_scale") { #else } else if (post_op_param.name == "src_scale" || post_op_param.name == "wei_scale" || post_op_param.name == "dst_scale") { #endif key_creator.AddAsKey(post_op_param.partial_key); } else if (post_op_param.name == "fuse_bn") { key_creator.AddAsKey(post_op_param.name); key_creator.AddAsKey(convFwdDims.fuse_bn_dims); } else { return string("not_a_key"); } } return key_creator.GetKey(); } MklPrimitive* GetConvFwd(const MklConvFwdParams& convFwdDims) { string key = CreateKey(convFwdDims); return this->GetOp(key); } void SetConvFwd(const MklConvFwdParams& convFwdDims, MklPrimitive* op) { string key = CreateKey(convFwdDims); this->SetOp(key, op); } }; template <typename Device, typename Tinput, typename Tfilter, typename Tbias, typename Toutput, typename Ttemp_output, typename Tpadding, bool bias_enabled, bool pad_enabled, bool is_depthwise, bool native_format> class MklConvOp : public OpKernel { public: ~MklConvOp() {} explicit MklConvOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_)); OP_REQUIRES( context, !(context->HasAttr("padding_list") && context->HasAttr("explicit_paddings")), absl::InvalidArgumentError("Can only have 1 `padding` list at most")); if (context->HasAttr("padding_list")) { OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list_)); } if (context->HasAttr("explicit_paddings")) { OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &padding_list_)); } OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format_str_)); OP_REQUIRES(context, FormatFromString(data_format_str_, &data_format_), absl::InvalidArgumentError("Invalid data format")); OP_REQUIRES(context, (strides_.size() == 4 || strides_.size() == 5), absl::InvalidArgumentError("Sliding window strides field must " "specify 4 or 5 dimensions")); const int64 stride_n = GetTensorDim(strides_, data_format_, 'N'); const int64 stride_c = GetTensorDim(strides_, data_format_, 'C'); OP_REQUIRES( context, stride_n == 1 && stride_c == 1, absl::UnimplementedError("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); is_filter_const_ = false; if (AreWeightsFrozen()) { is_filter_const_ = true; } else if (context->HasAttr("is_filter_const")) { OP_REQUIRES_OK(context, context->GetAttr("is_filter_const", &is_filter_const_)); } if (strides_.size() == 4) { OP_REQUIRES( context, dilations_.size() == 4, absl::InvalidArgumentError("Sliding window dilations field must " "specify 4 dimensions")); const int64 dilation_n = GetTensorDim(dilations_, data_format_, 'N'); const int64 dilation_c = GetTensorDim(dilations_, data_format_, 'C'); const int64 dilation_h = GetTensorDim(dilations_, data_format_, 'H'); const int64 dilation_w = GetTensorDim(dilations_, data_format_, 'W'); OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1, absl::InvalidArgumentError( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); OP_REQUIRES( context, dilation_h > 0 && dilation_w > 0, absl::InvalidArgumentError("Dilated rates should be larger than 0.")); } else if (strides_.size() == 5) { OP_REQUIRES(context, dilations_.size() == 5, absl::InvalidArgumentError("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilations_, data_format_, 'N') == 1 && GetTensorDim(dilations_, data_format_, 'C') == 1), absl::InvalidArgumentError( "Current implementation does not yet support " "dilations rates in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(dilations_, data_format_, '0') > 0 && GetTensorDim(dilations_, data_format_, '1') > 0 && GetTensorDim(dilations_, data_format_, '2') > 0), absl::InvalidArgumentError("Dilated rates should be larger than 0.")); } } void Compute(OpKernelContext* context) override { try { const Tensor& src_tensor = MklGetInput(context, kInputIndex_Src); const Tensor& filter_tensor = MklGetInput(context, kInputIndex_Filter); OP_REQUIRES( context, filter_tensor.NumElements() > 0, absl::InvalidArgumentError("filter must not have zero elements " "(i.e. all dimensions must be non-zero)")); if (std::is_same<Tinput, float>::value) { (void)SetFPMathMode(); } MklDnnShape src_mkl_shape, filter_mkl_shape; GetMklShape(context, kInputIndex_Src, &src_mkl_shape, native_format); GetMklShape(context, kInputIndex_Filter, &filter_mkl_shape, native_format); OP_REQUIRES(context, !filter_mkl_shape.IsMklTensor(), absl::InvalidArgumentError("Filter should not be in " "Mkl Layout")); MklDnnData<Tinput> src(&cpu_engine_); MklDnnData<Tfilter> filter(&cpu_engine_); memory::dims src_dims, filter_dims, padding_left, padding_right, dilations, strides; memory::dims dst_dims_tf_order, dst_dims_mkl_order; bool pad_attr_enabled = false; for (auto const& padding_val : padding_list_) { if (padding_val) { pad_attr_enabled = true; break; } } if (fuse_pad_ || pad_attr_enabled) { PadWithConvFusion(context, padding_left, padding_right, pad_attr_enabled, data_format_str_); } MklDnnConvUtil conv_utl(context, strides_, padding_, data_format_, dilations_); auto src_tf_shape = GetTfShape(context, kInputIndex_Src, native_format); auto filter_tf_shape = GetTfShape(context, kInputIndex_Filter, native_format); bool is_grouped_convolution = false; conv_utl.GetConvFwdSizesInMklOrder( src_tf_shape, filter_tf_shape, &src_dims, &filter_dims, &strides, &dilations, &dst_dims_tf_order, &dst_dims_mkl_order, &padding_left, &padding_right, &is_grouped_convolution, (fuse_pad_ || pad_attr_enabled), is_depthwise); if (!context->status().ok()) return; TensorShape dst_tf_shape = MklDnnDimsToTFShape(dst_dims_tf_order); Tensor* dst_tensor = nullptr; bool emit_filter_output = (typeid(Tinput) == typeid(Tfilter) && typeid(Tinput) == typeid(Toutput) && (typeid(Tinput) == typeid(float) || typeid(Tinput) == typeid(bfloat16))) && !native_format; if (dst_tf_shape.num_elements() == 0 || dst_dims_tf_order[0] == 0) { MklDnnShape dst_mkl_shape; dst_mkl_shape.SetMklTensor(false); AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor, src_tf_shape, dst_mkl_shape, native_format); filter_mkl_shape.SetMklTensor(false); Tensor* output_filter_tensor = nullptr; if (emit_filter_output) { filter_mkl_shape.SetMklTensor(false); AllocateOutputSetMklShape(context, kOutputIndex_Filter, &output_filter_tensor, filter_tf_shape, filter_mkl_shape); } return; } bool is_conv2d = (strides_.size() == 4); bool is_conv3d = (strides_.size() == 5); if (!is_conv2d && !is_conv3d) { OP_REQUIRES(context, !pad_enabled, absl::InvalidArgumentError( "Pad + Conv fusion only works for 2D/3D")); OP_REQUIRES( context, !fuse_pad_, absl::InvalidArgumentError("Pad+Conv fusion only works for 2D/3D")); } if (is_depthwise) { OP_REQUIRES(context, is_conv2d, absl::InvalidArgumentError( "Only 2D convolution is supported for depthwise.")); } auto tf_fmt = is_conv2d ? TFDataFormatToMklDnnDataFormat(data_format_) : TFDataFormatToMklDnn3DDataFormat(data_format_); auto mkl_fmt_tag = MklTensorFormatToMklDnnDataFormat(tf_fmt); OP_REQUIRES(context, mkl_fmt_tag != memory::format_tag::undef, absl::InvalidArgumentError("Invalid data format")); auto src_md = src_mkl_shape.IsMklTensor() ? src_mkl_shape.GetMklLayout() : memory::desc(src_dims, MklDnnType<Tinput>(), mkl_fmt_tag); src.SetUsrMem(src_md, &src_tensor); auto filter_format = is_conv2d ? ((is_depthwise || is_grouped_convolution) ? memory::format_tag::hwigo : memory::format_tag::hwio) : memory::format_tag::dhwio; DCHECK(!filter_mkl_shape.IsMklTensor()); auto filter_md = filter_mkl_shape.IsMklTensor() ? filter_mkl_shape.GetMklLayout() : memory::desc(filter_dims, MklDnnType<Tfilter>(), filter_format); filter.SetUsrMem(filter_md, &filter_tensor); for (int i = 0; i < dilations.size(); ++i) --dilations[i]; bool do_not_cache = MklPrimitiveFactory<Tinput>::IsPrimitiveMemOptEnabled() && (src_dims[MklDnnDims::Dim_N] > kSmallBatchSize) && (MklPrimitiveFactory<Tinput>::IsLegacyPlatform() || IsConv1x1StrideNot1(filter_dims, strides)); MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Ttemp_output>* conv_fwd = nullptr; memory::dims bias_dims = {}; if (fuse_biasadd_) { conv_utl.GetBiasSizeInMklOrder(kInputIndex_Bias, &bias_dims); } memory::dims fuse_bn_dims = {}; TensorShape fuse_bn_shape; if (fuse_bn_) { fuse_bn_shape = MklGetInput(context, kInputIndex_BN_Mean).shape(); OP_REQUIRES(context, fuse_bn_shape.dims() == 1, absl::InvalidArgumentError( absl::StrCat("FusedBatchNorm must be 1D, not: ", fuse_bn_shape.DebugString()))); fuse_bn_dims = {1, fuse_bn_shape.dim_size(0), 1, 1}; } MklConvFwdParams convFwdDims( src_dims, filter_dims, fuse_biasadd_ ? bias_dims : NONE_DIMS, dst_dims_mkl_order, strides, dilations, padding_left, padding_right, fuse_bn_dims, tf_fmt, native_format, is_depthwise, is_filter_const_); this->ExtendConvFwdParams(context, convFwdDims); Eigen::ThreadPoolInterface* eigen_interface = EigenThreadPoolFromTfContext(context); tsl::OneDnnThreadPool eigen_tp(eigen_interface, ThreadPoolUseCallerThread()); conv_fwd = MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias, Ttemp_output>::Get( convFwdDims, do_not_cache); MklDnnShape output_mkl_shape; std::shared_ptr<ConvFwdPd> conv_fwd_pd = conv_fwd->GetPrimitiveDesc(); AllocateOutputTensor(context, *conv_fwd_pd, dst_dims_mkl_order, tf_fmt, &output_mkl_shape, &dst_tensor); Tensor* filter_out_tensor = nullptr; if (emit_filter_output) { AllocateFilterOutputTensor(context, *conv_fwd_pd, TFShapeToMklDnnDims(filter_tf_shape), &filter_out_tensor); } Ttemp_output* dst_data = reinterpret_cast<Ttemp_output*>(dst_tensor->flat<Toutput>().data()); Tinput* src_data = nullptr; if (src_md != conv_fwd_pd->src_desc()) { src.SetUsrMem(src_md, &src_tensor); src.CheckReorderToOpMem(conv_fwd_pd->src_desc(), cpu_engine_, context); src_data = static_cast<Tinput*>(src.GetOpMem().get_data_handle()); } else { src_data = static_cast<Tinput*>( const_cast<Tinput*>(src_tensor.flat<Tinput>().data())); } Tfilter* filter_data = nullptr; if (filter_md != conv_fwd_pd->weights_desc()) { bool is_filter_cached = false; if (is_filter_const_) { if (IsFilterCacheEmpty(context)) { CacheFilter(context, conv_fwd_pd, filter_data, filter_tensor, filter, filter_md, filter_mkl_shape); } filter_data = GetCachedFilter(context, conv_fwd_pd->weights_desc()); is_filter_cached = (filter_data != nullptr); } if (!is_filter_cached) { filter.SetUsrMem(filter_md, &filter_tensor); if (filter_out_tensor == nullptr) { filter.CheckReorderToOpMem(conv_fwd_pd->weights_desc(), cpu_engine_, context); } else { filter.CheckReorderToOpMem( conv_fwd_pd->weights_desc(), filter.GetTensorBuffer(filter_out_tensor), cpu_engine_, context); } filter_data = static_cast<Tfilter*>(filter.GetOpMem().get_data_handle()); } } else { filter_data = static_cast<Tfilter*>( const_cast<Tfilter*>(filter_tensor.flat<Tfilter>().data())); } UserScratchPad<unsigned char> scratch_pad; scratch_pad.AllocateSPTensor(conv_fwd, context); std::shared_ptr<stream> fwd_cpu_stream; fwd_cpu_stream.reset(CreateStream(&eigen_tp, conv_fwd->GetEngine())); if (fuse_biasadd_) { const Tensor& bias_tensor = MklGetInput(context, kInputIndex_Bias); void* bias_data = this->GetBiasHandle(context, conv_fwd_pd, bias_tensor); conv_fwd->Execute(src_data, filter_data, bias_data, dst_data, convFwdDims, fwd_cpu_stream, scratch_pad.Get()); } else if (fuse_bn_) { const Tensor& bn_scale_tensor = MklGetInput(context, kInputIndex_BN_Scale); Tinput* bn_scale_data = static_cast<Tinput*>( const_cast<Tinput*>(bn_scale_tensor.flat<Tinput>().data())); const Tensor& bn_mean_tensor = MklGetInput(context, kInputIndex_BN_Mean); Tinput* bn_mean_data = static_cast<Tinput*>( const_cast<Tinput*>(bn_mean_tensor.flat<Tinput>().data())); const Tensor& bn_offset_tensor = MklGetInput(context, kInputIndex_BN_Offset); Tinput* bn_offset_data = static_cast<Tinput*>( const_cast<Tinput*>(bn_offset_tensor.flat<Tinput>().data())); Tensor bn_rsqrt_tensor; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<Tinput>::v(), fuse_bn_shape, &bn_rsqrt_tensor)); Tinput* bn_rsqrt_data = static_cast<Tinput*>( const_cast<Tinput*>(bn_rsqrt_tensor.flat<Tinput>().data())); this->ComputeBNScale(context, epsilon_, kInputIndex_BN_Variance, bn_rsqrt_data); conv_fwd->Execute(src_data, filter_data, nullptr, dst_data, bn_scale_data, bn_mean_data, bn_offset_data, bn_rsqrt_data, convFwdDims, fwd_cpu_stream, scratch_pad.Get()); } else { conv_fwd->Execute(src_data, filter_data, dst_data, convFwdDims, fwd_cpu_stream, scratch_pad.Get()); } if (do_not_cache) delete conv_fwd; } catch (dnnl::error& e) { string error_msg = tensorflow::strings::StrCat( "Status: ", e.status, ", message: ", string(e.message), ", in file ", __FILE__, ":", __LINE__); OP_REQUIRES_OK(context, absl::AbortedError(absl::StrCat( "Operation received an exception:", error_msg))); } } void PadWithConvFusion(OpKernelContext* context, memory::dims& padding_left, memory::dims& padding_right, bool pad_attr_enabled, string data_format_str_) { Tpadding* paddings = nullptr; if (pad_attr_enabled) { paddings = padding_list_.data(); } else { const Tensor& paddings_tf = MklGetInput(context, input_index_pad_); OP_REQUIRES(context, paddings_tf.dims() == 2, absl::InvalidArgumentError( absl::StrCat("paddings must be 2-dimensional: ", paddings_tf.shape().DebugString()))); paddings = static_cast<Tpadding*>( const_cast<Tpadding*>(paddings_tf.flat<Tpadding>().data())); } int64 pad_top = 0, pad_left = 0, pad_front = 0; int64 pad_bottom = 0, pad_right = 0, pad_back = 0; if (data_format_str_ == "NHWC") { pad_top = paddings[2]; pad_bottom = paddings[3]; pad_left = paddings[4]; pad_right = paddings[5]; } else if (data_format_str_ == "NCHW") { pad_top = paddings[4]; pad_bottom = paddings[5]; pad_left = paddings[6]; pad_right = paddings[7]; } else if (data_format_str_ == "NDHWC") { pad_front = paddings[2]; pad_back = paddings[3]; pad_top = paddings[4]; pad_bottom = paddings[5]; pad_left = paddings[6]; pad_right = paddings[7]; } else if (data_format_str_ == "NCDHW") { pad_front = paddings[4]; pad_back = paddings[5]; pad_top = paddings[6]; pad_bottom = paddings[7]; pad_left = paddings[8]; pad_right = paddings[9]; } if (data_format_str_ == "NHWC" || data_format_str_ == "NCHW") { padding_left = {static_cast<int>(pad_top), static_cast<int>(pad_left)}; padding_right = {static_cast<int>(pad_bottom), static_cast<int>(pad_right)}; } else if (data_format_str_ == "NDHWC" || data_format_str_ == "NCDHW") { padding_left = {static_cast<int>(pad_front), static_cast<int>(pad_top), static_cast<int>(pad_left)}; padding_right = {static_cast<int>(pad_back), static_cast<int>(pad_bottom), static_cast<int>(pad_right)}; } } protected: void set_input_add_idx(int input_add_idx) { input_index_add_ = input_add_idx; } int get_input_add_idx() { return input_index_add_; } void set_fuse_biasadd(bool fuse_biasadd) { fuse_biasadd_ = fuse_biasadd; } bool get_fuse_biasadd() { return fuse_biasadd_; } void set_fuse_activation(bool fuse_activation, dnnl::algorithm activation_alg, float alpha_or_upbound = 0.0, float beta = 0.0) { fuse_activation_ = fuse_activation; activation_alg_ = activation_alg; alpha_or_upbound_ = alpha_or_upbound; beta_ = beta; } void set_fuse_pad(bool fuse_pad) { fuse_pad_ = fuse_pad; if (fuse_bn_) { input_index_pad_ = 6; } else if (fuse_add_ && fuse_biasadd_) { input_index_pad_ = 4; } else { input_index_pad_ = 3; } } void set_fuse_add(bool fuse_add) { fuse_add_ = fuse_add; } bool get_fuse_add() { return fuse_add_; }; void set_fuse_bn(bool fuse_bn, float epsilon) { fuse_bn_ = fuse_bn; epsilon_ = epsilon; } virtual void ComputeBNScale(OpKernelContext* context, float epsilon, int bn_variance_index, Tinput* scale_buf_ptr) { OP_REQUIRES(context, false, absl::UnimplementedError( "Compute BN scale not expected in base class")); return; } virtual void ExtendConvFwdParams(OpKernelContext* context, MklConvFwdParams& params) { params.dtypes.append(typeid(Tinput).name()); params.dtypes.append(typeid(Tfilter).name()); params.dtypes.append(typeid(Tbias).name()); params.dtypes.append(typeid(Toutput).name()); bool is_quantized_input = std::is_same<Tinput, quint8>::value || std::is_same<Tinput, qint8>::value; if (!is_quantized_input) { if (fuse_add_) { params.post_op_params.push_back( {"sum", dnnl::algorithm::undef, {1.0}, ""}); } if (fuse_bn_) { params.post_op_params.push_back( {"fuse_bn", dnnl::algorithm::undef, {1.0}, ""}); } if (fuse_activation_) { params.post_op_params.push_back({"activation", activation_alg_, {1.0, alpha_or_upbound_, beta_}, ""}); } } } virtual void* GetBiasHandle(OpKernelContext* context, std::shared_ptr<ConvFwdPd>& conv2d_fwd_pd, const Tensor& bias_tensor) { if (fuse_biasadd_) { return static_cast<Tbias*>( const_cast<Tbias*>(bias_tensor.flat<Tbias>().data())); } return nullptr; } virtual void AllocateOutputTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, const memory::dims& output_dims_mkl_order, MklTensorFormat output_tf_format, MklDnnShape* output_mkl_shape, Tensor** output_tensor) { DCHECK(output_tensor); #ifndef ENABLE_ONEDNN_V3 auto dst_md = conv_prim_desc.dst_desc(); if (!std::is_same<Ttemp_output, Toutput>::value) { #ifndef ENABLE_ONEDNN_V3 dst_md.data.data_type = static_cast<dnnl_data_type_t>(MklDnnType<Toutput>()); #else dst_md = memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(), MklTensorFormatToMklDnnDataFormat(output_tf_format)); #endif } #else auto dst_md = std::is_same<Ttemp_output, Toutput>::value ? conv_prim_desc.dst_desc() : memory::desc(conv_prim_desc.dst_desc().get_dims(), MklDnnType<Toutput>(), MklTensorFormatToMklDnnDataFormat(output_tf_format)); #endif output_mkl_shape->SetMklTensor(true); output_mkl_shape->SET_MKL_LAYOUT(dst_md); output_mkl_shape->SetElemType(MklDnnType<Toutput>()); output_mkl_shape->SetTfLayout(output_dims_mkl_order.size(), output_dims_mkl_order, output_tf_format); TensorShape output_tf_shape; output_tf_shape.AddDim((dst_md.get_size() / sizeof(Toutput))); if (native_format) { output_tf_shape = output_mkl_shape->GetTfShape(); } bool is_quantized_input = std::is_same<Tinput, quint8>::value || std::is_same<Tinput, qint8>::value; if (fuse_add_ && !is_quantized_input) { const Tensor& add_tensor = MklGetInput(context, input_index_add_); MklDnnShape add_mkl_shape; GetMklShape(context, input_index_add_, &add_mkl_shape, native_format); if (native_format && context->forward_input_to_output_with_shape( input_index_add_, kOutputIndex_Dst, output_tf_shape, output_tensor)) { return; } if (!native_format && add_mkl_shape == *output_mkl_shape && ForwardMklTensorInToOutWithMklShape(context, input_index_add_, kOutputIndex_Dst, output_tensor, add_mkl_shape, false)) { return; } else { AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor, output_tf_shape, *output_mkl_shape, native_format); auto output_format_tag = MklTensorFormatToMklDnnDataFormat( output_mkl_shape->GetTfDataFormat()); OP_REQUIRES(context, output_format_tag != memory::format_tag::undef, absl::InvalidArgumentError( "MklConvOp: AddN fusion: Invalid data format")); auto add_md = add_mkl_shape.IsMklTensor() ? add_mkl_shape.GetMklLayout() : memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(), output_format_tag); void* add_buf = static_cast<void*>( const_cast<Toutput*>(add_tensor.flat<Toutput>().data())); void* dst_buf = static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data()); if (native_format) { add_md = dst_md = memory::desc({add_tensor.NumElements()}, MklDnnType<Toutput>(), dnnl::memory::format_tag::x); } fuse_add_src_.reset(new memory(add_md, this->cpu_engine_, add_buf)); fuse_add_dst_.reset(new memory(dst_md, this->cpu_engine_, dst_buf)); auto reorder_desc = ReorderPd(this->cpu_engine_, add_md, this->cpu_engine_, dst_md); CreateAndExecuteReorder(reorder_desc, *fuse_add_src_, *fuse_add_dst_, this->cpu_engine_, context); } } else { AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor, output_tf_shape, *output_mkl_shape, native_format); } } engine cpu_engine_ = engine(engine::kind::cpu, 0); private: std::shared_ptr<dnnl::memory> fuse_add_src_; std::shared_ptr<dnnl::memory> fuse_add_dst_; std::vector<int32> strides_; std::vector<int32> dilations_; std::vector<Tpadding> padding_list_; bool is_filter_const_; mutex mu_; Padding padding_; string data_format_str_; TensorFormat data_format_; Tensor cached_filter_data_ TF_GUARDED_BY(mu_); #ifndef ENABLE_ONEDNN_V3 Tensor cached_filter_md_ TF_GUARDED_BY(mu_); #else FilterMemoryDesc cached_filter_md_ TF_GUARDED_BY(mu_); #endif bool fuse_biasadd_ = bias_enabled; bool fuse_activation_ = false; bool fuse_pad_ = pad_enabled; bool fuse_add_ = false; bool fuse_bn_ = false; float epsilon_ = 0.0001; float alpha_or_upbound_ = 0.0; float beta_ = 0.0; dnnl::algorithm activation_alg_ = dnnl::algorithm::undef; int input_index_pad_ = 2; int input_index_add_ = 3; const int kInputIndex_Src = 0, kInputIndex_Filter = 1, kInputIndex_Bias = 2; const int kOutputIndex_Dst = 0, kOutputIndex_Filter = 1; const int kDilationH = 0, kDilationW = 1; const int kInputIndex_BN_Scale = 2, kInputIndex_BN_Offset = 3; const int kInputIndex_BN_Mean = 4, kInputIndex_BN_Variance = 5; MklTensorFormat GetFilterTfDataFormat(const MklDnnShape* filter_mkl_shape, const ConvFwdPd& conv_prim_desc) const { DCHECK(filter_mkl_shape); return filter_mkl_shape->GetTfDataFormat(); } void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, Tensor** filter_tensor, const MklDnnShape* filter_mkl_shape) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { DCHECK(filter_tensor); TensorShape filter_tf_shape; filter_tf_shape.AddDim( (conv_prim_desc.weights_desc().get_size() / sizeof(Tfilter))); OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<Tfilter>::value, filter_tf_shape, &cached_filter_data_)); *filter_tensor = &cached_filter_data_; memory::desc weights_desc = conv_prim_desc.weights_desc(); #ifndef ENABLE_ONEDNN_V3 TensorShape cached_filter_md_shape; cached_filter_md_shape.AddDim(sizeof(weights_desc) / sizeof(uint8)); OP_REQUIRES_OK(context, context->allocate_temp(DT_UINT8, cached_filter_md_shape, &cached_filter_md_)); *reinterpret_cast<memory::desc*>(cached_filter_md_.flat<uint8>().data()) = weights_desc; #else cached_filter_md_ = FilterMemoryDesc( weights_desc.get_ndims(), weights_desc.get_inner_nblks(), weights_desc.get_data_type(), weights_desc.get_dims(), weights_desc.get_inner_blks(), weights_desc.get_inner_idxs(), weights_desc.get_strides()); #endif } void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, Tensor** filter_tensor) { AllocateTensor(context, conv_prim_desc, filter_tensor, nullptr); } void AllocateFilterOutputTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, const memory::dims& filter_dims_tf_order, Tensor** filter_tensor) { DCHECK(filter_tensor); auto filter_md = conv_prim_desc.weights_desc(); MklDnnShape filter_mkl_shape; filter_mkl_shape.SetMklTensor(true); filter_mkl_shape.SET_MKL_LAYOUT(filter_md); filter_mkl_shape.SetElemType(MklDnnType<Tfilter>()); filter_mkl_shape.SetTfLayout(filter_dims_tf_order.size(), filter_dims_tf_order, MklTensorFormat::FORMAT_BLOCKED); TensorShape filter_tf_shape; filter_tf_shape.AddDim((filter_md.get_size() / sizeof(Tfilter))); AllocateOutputSetMklShape(context, kOutputIndex_Filter, filter_tensor, filter_tf_shape, filter_mkl_shape); } inline bool IsFilterCacheEmpty(OpKernelContext* context) TF_LOCKS_EXCLUDED(mu_) { tf_shared_lock lock(mu_); const Tensor& cached_filter_data_tensor = cached_filter_data_; return (cached_filter_data_tensor.NumElements() == 0); } void CacheFilter(OpKernelContext* context, const std::shared_ptr<ConvFwdPd>& conv_fwd_pd, Tfilter* filter_data, const Tensor& filter_tensor, MklDnnData<Tfilter>& filter, const memory::desc& filter_md, const MklDnnShape& filter_mkl_shape) TF_LOCKS_EXCLUDED(mu_) { mutex_lock lock(mu_); const Tensor& cached_filter_data_tensor = cached_filter_data_; if (cached_filter_data_tensor.NumElements() > 0) { return; } #ifdef ENABLE_ONEDNN_V3 if (filter_md.get_format_kind() != memory::format_kind::blocked) { return; } #endif filter.SetUsrMem(filter_md, &filter_tensor); filter.CheckReorderToOpMem(conv_fwd_pd.get()->weights_desc(), this->cpu_engine_, context); filter_data = static_cast<Tfilter*>(filter.GetOpMem().get_data_handle()); Tensor* filter_tensor_ptr = nullptr; AllocateTensor(context, *conv_fwd_pd, &filter_tensor_ptr, &filter_mkl_shape); void* cached_filter_data = filter.GetTensorBuffer(filter_tensor_ptr); size_t cached_filter_data_size = filter.GetOpMem().get_desc().get_size(); memcpy(cached_filter_data, filter_data, cached_filter_data_size); } #ifndef ENABLE_ONEDNN_V3 bool AreMemoryDescriptorsEqual(const memory::desc& filter_md, const Tensor& cached_filter_md) { auto filter_md_data = filter_md.data; const char* filter_data = reinterpret_cast<const char*>(&filter_md_data); auto cached_filter_md_data = cached_filter_md.scalar<int64_t>()(); const char* cached_filter_data = reinterpret_cast<const char*>(&cached_filter_md_data); for (size_t i = 0; i < sizeof(filter_md_data); ++i) { if (*filter_data++ != *cached_filter_data++) { return false; } } return true; } #endif Tfilter* GetCachedFilter(OpKernelContext* context, const memory::desc& filter_md) TF_LOCKS_EXCLUDED(mu_) { tf_shared_lock lock(mu_); const Tensor& cached_filter_data = cached_filter_data_; #ifndef ENABLE_ONEDNN_V3 const Tensor& cached_filter_md = cached_filter_md_; if (filter_md == *static_cast<memory::desc*>(cached_filter_md.data())) { return static_cast<Tfilter*>( const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data())); } return nullptr; #else if (cached_filter_md_ == FilterMemoryDesc(filter_md.get_ndims(), filter_md.get_inner_nblks(), filter_md.get_data_type(), filter_md.get_dims(), filter_md.get_inner_blks(), filter_md.get_inner_idxs(), filter_md.get_strides())) { return static_cast<Tfilter*>( const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data())); } return nullptr; #endif } }; template <typename Device, typename Tinput, typename Tfilter, typename Tbias, typename Toutput, typename Ttemp_output, typename Tpadding, bool pad_enabled, bool native_format> class MklFusedConvOp : public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, false, false, false, native_format> { public: explicit MklFusedConvOp(OpKernelConstruction* context) : MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, false, false, false, native_format>(context) { std::vector<string> fused_ops; OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops)); int num_args; OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args)); OP_REQUIRES(context, !fused_ops.empty(), absl::InvalidArgumentError( "Fused Conv2D must have at least one fused op.")); if (fused_ops == std::vector<string>{"BiasAdd"}) { this->set_fuse_biasadd(true); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"Relu"}) { this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); } else if (fused_ops == std::vector<string>{"Relu6"}) { this->SET_FUSE_ACTIVATION_FOR_RELU6; } else if (fused_ops == std::vector<string>{"Elu"}) { this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); } else if (fused_ops == std::vector<string>{"LeakyRelu"}) { float leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); } else if (fused_ops == std::vector<string>{"FusedBatchNorm"}) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) { this->set_fuse_biasadd(true); this->SET_FUSE_ACTIVATION_FOR_RELU6; OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) { this->set_fuse_biasadd(true); float leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish, 1.0 / 6.0, 0.5); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv2D must have two extra arguments: bias and add.")); } else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu"}) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); } else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu6"}) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); this->SET_FUSE_ACTIVATION_FOR_RELU6; } else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Elu"}) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); } else if (fused_ops == std::vector<string>{"FusedBatchNorm", "LeakyRelu"}) { float epsilon, leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); } else if (fused_ops == std::vector<string>{"FusedBatchNorm", "_MklSwish"}) { float epsilon; OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon)); OP_REQUIRES( context, num_args == 4, absl::InvalidArgumentError( "Fused Conv2D with batchnorm must have 4 extra argument")); this->set_fuse_bn(true, epsilon); this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv2D must have two extra arguments: bias and add.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->SET_FUSE_ACTIVATION_FOR_RELU6; OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv2D must have two extra arguments: bias and add.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv2D must have two extra arguments: bias and add.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); float leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv2D must have two extra arguments: bias and add.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish, 1.0); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "_FusedConv2D must have one extra argument: bias.")); } else if (fused_ops == std::vector<string>{"BiasAdd", "_MklSwish"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0); OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv2D must have one extra argument: bias.")); } else { OP_REQUIRES(context, false, absl::UnimplementedError( absl::StrCat("Fusion is not implemented: [", absl::StrJoin(fused_ops, ","), "]"))); } if (pad_enabled) { this->set_fuse_pad(true); } } void ComputeBNScale(OpKernelContext* context, float epsilon, int bn_variance_index, Tinput* scale_buf_ptr) override { const Tensor& bn_var_tensor = MklGetInput(context, bn_variance_index); Eigen::Tensor<Tinput, 1, Eigen::RowMajor> bn_rsqrt = (bn_var_tensor.flat<Tinput>() + static_cast<Tinput>(epsilon)).rsqrt(); Tinput* bn_rsqrt_data = bn_rsqrt.data(); int64_t num_elem = bn_var_tensor.shape().dim_size(0); for (int64_t i = 0; i < num_elem; i++) { scale_buf_ptr[i] = bn_rsqrt_data[i]; } return; } virtual ~MklFusedConvOp() {} }; template <typename Device, typename Tinput, typename Tfilter, typename Tbias, typename Toutput, typename Ttemp_output, typename Tpadding, bool pad_enabled, bool bias_enabled, bool is_depthwise, bool native_format> class MklFusedDepthwiseConvOp : public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, bias_enabled, false, is_depthwise, native_format> { public: explicit MklFusedDepthwiseConvOp(OpKernelConstruction* context) : MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, bias_enabled, false, is_depthwise, native_format>( context) { std::vector<string> fused_ops; OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops)); int num_args; OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args)); OP_REQUIRES(context, !fused_ops.empty(), absl::InvalidArgumentError( "Fused DepthwiseConv2D must have at least one fused op.")); if (fused_ops == std::vector<string>{"BiasAdd"}) { this->set_fuse_biasadd(true); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) { this->set_fuse_biasadd(true); this->SET_FUSE_ACTIVATION_FOR_RELU6; } else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); } else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish, 1.0 / 6.0, 0.5); } else { OP_REQUIRES(context, false, absl::InvalidArgumentError( absl::StrCat("Fusion is not implemented: [", absl::StrJoin(fused_ops, ","), "]"))); } OP_REQUIRES( context, num_args == 1, absl::InvalidArgumentError( "Fused DepthwiseConv2D must have one extra argument: bias.")); if (pad_enabled) { this->set_fuse_pad(true); } } virtual ~MklFusedDepthwiseConvOp() {} }; enum class oneDNNFusedOps { kBias = 1, kSum = 2, kRelu = 4, kRequantize = 8 }; template <typename Device, typename Tinput, typename Tbias, typename Toutput, typename Ttemp_output, bool is_depthwise, string legacy_fused_ops[], int num_fused_ops> class MklQuantizedConvOp : public MklConvOp< Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true> { public: virtual ~MklQuantizedConvOp() { if (this->input_bias_ != nullptr) { delete this->input_bias_; input_bias_ = nullptr; } if (this->scaled_bias_ != nullptr) { delete this->scaled_bias_; scaled_bias_ = nullptr; } } explicit MklQuantizedConvOp(OpKernelConstruction* context) : MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true>(context) { std::vector<std::vector<string>> supported_fusions = { {"BiasAdd"}, {"Relu"}, {"Requantize"}, {"BiasAdd", "Relu"}, {"BiasAdd", "Requantize"}, {"Relu", "Requantize"}, {"BiasAdd", "Relu", "Requantize"}, {"BiasAdd", "Sum", "Relu"}, {"BiasAdd", "Sum", "Relu", "Requantize"}}; std::vector<string> fused_ops_attr; if (context->HasAttr("fused_ops")) { OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops_attr)); } OP_REQUIRES(context, !(fused_ops_attr.size() > 0 && num_fused_ops > 0), absl::InvalidArgumentError( "QuantizedConv fused ops should be only available through " "either new API or old API, got both.")); if (fused_ops_attr.size() > 0) { fused_ops_ = fused_ops_attr; } else if (num_fused_ops > 0) { for (int i = 0; i < num_fused_ops; ++i) { fused_ops_.push_back(legacy_fused_ops[i]); } } if (fused_ops_.size() > 0) { bool is_fusion_supported = std::find(supported_fusions.begin(), supported_fusions.end(), fused_ops_) != supported_fusions.end(); OP_REQUIRES(context, is_fusion_supported, absl::InvalidArgumentError( absl::StrCat("Unsupported QuantizedConv fusion: [", absl::StrJoin(fused_ops_, ","), "]"))); } for (const auto& op : fused_ops_) { fused_op_flags_ ^= static_cast<int64_t>(StrToEnum(op)); } DataType bias_dt, summand_dt, out_dt; if (IsFused(oneDNNFusedOps::kBias)) { this->set_fuse_biasadd(true); OP_REQUIRES_OK(context, context->GetAttr("is_bias_const", &is_bias_const_)); if (context->HasAttr("Tbias")) { OP_REQUIRES_OK(context, context->GetAttr("Tbias", &bias_dt)); } } if (IsFused(oneDNNFusedOps::kSum)) { this->set_fuse_add(true); } const bool fuse_requantize = IsFused(oneDNNFusedOps::kRequantize); OP_REQUIRES_OK(context, context->GetAttr("out_type", &out_dt)); if (fuse_requantize) { OP_REQUIRES( context, out_dt == DT_QINT8 || out_dt == DT_QUINT8, absl::InvalidArgumentError("QuantizedConv: unsupported output " "type when Requantize is fused.")); } if (context->HasAttr("Tsummand")) { OP_REQUIRES_OK(context, context->GetAttr("Tsummand", &summand_dt)); if (!this->get_fuse_add()) { OP_REQUIRES( context, summand_dt == out_dt, absl::InvalidArgumentError( "QuantizedConv: incorrect summand data type. When Sum is not " "fused, Tsummand attribute must have same value as out_type.")); } } #ifndef ENABLE_ONEDNN_V3 int idx = fuse_requantize ? 1 : 0; #else post_op_to_idx_["src_scale"] = 0; post_op_to_idx_["wei_scale"] = 1; post_op_to_idx_["dst_scale"] = 2; int idx = 3; #endif for (int i = 0; i < fused_ops_.size(); ++i) { if (fused_ops_[i] == "Requantize") { #ifndef ENABLE_ONEDNN_V3 post_op_to_idx_["output_scale"] = 0; #endif } else if (fused_ops_[i] == "Sum") { post_op_to_idx_["sum"] = idx++; } else if (fused_ops_[i] == "Relu") { post_op_to_idx_["activation"] = idx++; } } bool is_filter_const; OP_REQUIRES_OK(context, context->GetAttr("is_filter_const", &is_filter_const)); OP_REQUIRES( context, is_filter_const, absl::InvalidArgumentError("QuantizedConv: filter must be a constant")); if (num_fused_ops == -1) { int non_minmax_arg_idx_base = 2; int minmax_arg_idx_base = 6; int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0; int summand_idx_offset = this->get_fuse_add() ? 1 : 0; int bias_min_max_idx_offset = this->get_fuse_biasadd() && !(bias_dt == DT_FLOAT || bias_dt == DT_QINT32) ? 2 : 0; min_input_idx_ = non_minmax_arg_idx_base + bias_idx_offset + summand_idx_offset; max_input_idx_ = min_input_idx_ + 1; min_filter_idx_ = min_input_idx_ + 2; max_filter_idx_ = min_input_idx_ + 3; if (this->get_fuse_biasadd()) { min_bias_idx_ = minmax_arg_idx_base + bias_idx_offset + summand_idx_offset; max_bias_idx_ = min_bias_idx_ + 1; } if (this->get_fuse_add()) { this->set_input_add_idx(non_minmax_arg_idx_base + bias_idx_offset); if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) { min_summand_idx_ = minmax_arg_idx_base + bias_idx_offset + summand_idx_offset + bias_min_max_idx_offset; max_summand_idx_ = min_summand_idx_ + 1; } } if (fuse_requantize) { min_freezed_output_idx_ = context->num_inputs() - 2; max_freezed_output_idx_ = min_freezed_output_idx_ + 1; } } else { int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0; min_input_idx_ = 2 + bias_idx_offset; max_input_idx_ = 3 + bias_idx_offset; min_filter_idx_ = 4 + bias_idx_offset; max_filter_idx_ = 5 + bias_idx_offset; if (fuse_requantize) { min_freezed_output_idx_ = 6 + bias_idx_offset; max_freezed_output_idx_ = 7 + bias_idx_offset; } if (this->get_fuse_add()) { int input_add_idx = std::is_same<Toutput, quint8>::value ? context->num_inputs() - 1 - 2 : context->num_inputs() - 1; this->set_input_add_idx(input_add_idx); if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) { min_summand_idx_ = 9 + bias_idx_offset; max_summand_idx_ = 10 + bias_idx_offset; } } } } void Compute(OpKernelContext* context) override { MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true>::Compute(context); const float min_input = context->input(min_input_idx_).template scalar<float>()(); const float max_input = context->input(max_input_idx_).template scalar<float>()(); Tensor* output_min = nullptr; Tensor* output_max = nullptr; if (std::is_same<Toutput, quint8>::value || std::is_same<Toutput, qint8>::value) { OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_min->flat<float>()(0) = context->input(min_freezed_output_idx_).template scalar<float>()(); output_max->flat<float>()(0) = context->input(max_freezed_output_idx_).template scalar<float>()(); } else { const Tensor& min_filter = context->input(min_filter_idx_); const Tensor& max_filter = context->input(max_filter_idx_); if (min_filter.dims() == 0) { float min_output_value; float max_output_value; MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>( min_input, max_input, min_filter.scalar<float>()(), max_filter.scalar<float>()(), &min_output_value, &max_output_value); OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min)); OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max)); output_min->flat<float>()(0) = min_output_value; output_max->flat<float>()(0) = max_output_value; } else { size_t depth = min_filter.NumElements(); OP_REQUIRES_OK(context, context->allocate_output( 1, {static_cast<ptrdiff_t>(depth)}, &output_min)); OP_REQUIRES_OK(context, context->allocate_output( 2, {static_cast<ptrdiff_t>(depth)}, &output_max)); MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>( min_input, max_input, min_filter, max_filter, &output_min, &output_max); } } } protected: void ExtendConvFwdParams(OpKernelContext* context, MklConvFwdParams& params) override { MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true>::ExtendConvFwdParams(context, params); params.post_op_params.resize(post_op_to_idx_.size()); const float min_input = context->input(min_input_idx_).template scalar<float>()(); const float max_input = context->input(max_input_idx_).template scalar<float>()(); const Tensor& min_filter_vector = context->input(min_filter_idx_); const Tensor& max_filter_vector = context->input(max_filter_idx_); OP_REQUIRES( context, ((min_filter_vector.NumElements() > 0) && (max_filter_vector.NumElements() > 0) && (min_filter_vector.shape() == max_filter_vector.shape())), absl::InvalidArgumentError("`min_ and max_filter` must have same" "shape and contain at least one element.")); size_t depth = min_filter_vector.NumElements(); const float* min_filter = min_filter_vector.flat<float>().data(); const float* max_filter = max_filter_vector.flat<float>().data(); std::vector<float> SCALE(depth); float float_input_range = std::max(std::abs(min_input), std::abs(max_input)); #ifdef ENABLE_ONEDNN_V3 float int_input_limit = std::is_same<Tinput, quint8>::value ? 255.0f : 127.0f; const float src_scale = float_input_range / int_input_limit; #endif if (std::is_same<Toutput, quint8>::value || std::is_same<Toutput, qint8>::value) { const float min_freezed_output = context->input(min_freezed_output_idx_).template scalar<float>()(); const float max_freezed_output = context->input(max_freezed_output_idx_).template scalar<float>()(); float int_output_limit = std::is_same<Toutput, quint8>::value ? 255.0f : 127.0f; float float_output_range = std::max(std::abs(min_freezed_output), std::abs(max_freezed_output)); #ifndef ENABLE_ONEDNN_V3 const float int_const_scale_limit = (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; #endif for (size_t i = 0; i < depth; ++i) { float float_filter_range = std::max(std::abs(min_filter[i]), std::abs(max_filter[i])); #ifndef ENABLE_ONEDNN_V3 scales[i] = int_output_limit * float_input_range * float_filter_range / (int_const_scale_limit * float_output_range); #else wei_scale[i] = float_filter_range / 127.0; #endif } #ifndef ENABLE_ONEDNN_V3 FactoryKeyCreator param_key; param_key.AddAsKey<float>(min_input); param_key.AddAsKey<float>(max_input); param_key.AddAsKey<float>(min_freezed_output); param_key.AddAsKey<float>(max_freezed_output); param_key.AddAsKey<const float*>(min_filter); param_key.AddAsKey<const float*>(max_filter); params.post_op_params[post_op_to_idx_["output_scale"]] = { "output_scale", dnnl::algorithm::undef, scales, param_key.GetKey()}; #else const float dst_scale = float_output_range / int_output_limit; FactoryKeyCreator dst_param_key; dst_param_key.AddAsKey<float>(min_freezed_output); dst_param_key.AddAsKey<float>(max_freezed_output); params.post_op_params[post_op_to_idx_["dst_scale"]] = { "dst_scale", dnnl::algorithm::undef, {dst_scale}, dst_param_key.GetKey()}; #endif } else { #ifdef ENABLE_ONEDNN_V3 if (!std::is_same<Toutput, qint32>::value) TF_CHECK_OK(absl::FailedPreconditionError( "Output datatype is expected to be qint32.")); float min_min_filter = min_filter[0]; float max_max_filter = max_filter[0]; for (size_t i = 0; i < depth; ++i) { float float_filter_range = std::max(std::abs(min_filter[i]), std::abs(max_filter[i])); wei_scale[i] = float_filter_range / 127.0; if (min_filter[i] < min_min_filter) min_min_filter = min_filter[i]; if (max_filter[i] > max_max_filter) max_max_filter = max_filter[i]; } const float single_wei_scale = std::max(std::abs(min_min_filter), std::abs(max_max_filter)) / 127.0; const float dst_scale = single_wei_scale * src_scale; FactoryKeyCreator dst_param_key; dst_param_key.AddAsKey<float>(dst_scale); params.post_op_params[post_op_to_idx_["dst_scale"]] = { "dst_scale", dnnl::algorithm::undef, {dst_scale}, dst_param_key.GetKey()}; #endif } #ifdef ENABLE_ONEDNN_V3 FactoryKeyCreator src_param_key; src_param_key.AddAsKey<float>(min_input); src_param_key.AddAsKey<float>(max_input); FactoryKeyCreator wei_param_key; wei_param_key.AddAsKey<const float*>(min_filter); wei_param_key.AddAsKey<const float*>(max_filter); params.post_op_params[post_op_to_idx_["src_scale"]] = { "src_scale", dnnl::algorithm::undef, {src_scale}, src_param_key.GetKey()}; params.post_op_params[post_op_to_idx_["wei_scale"]] = { "wei_scale", dnnl::algorithm::undef, wei_scale, wei_param_key.GetKey()}; #endif if (this->get_fuse_add()) { DataType summand_dt = this->input_type(this->get_input_add_idx()); if (std::is_same<Toutput, quint8>::value) { bool summand_condition = (summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8); DCHECK((summand_condition)); const Tensor& min_freezed_output_tensor = context->input(min_freezed_output_idx_); const Tensor& max_freezed_output_tensor = context->input(max_freezed_output_idx_); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min_freezed_output_tensor.shape()), absl::InvalidArgumentError( absl::StrCat("`min_freezed_output` must be rank 0 but is rank ", min_freezed_output_tensor.dims()))); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_freezed_output_tensor.shape()), absl::InvalidArgumentError( absl::StrCat("`max_freezed_output` must be rank 0 but is rank ", max_freezed_output_tensor.dims()))); const Tensor& min_freezed_summand_tensor = context->input(min_summand_idx_); const Tensor& max_freezed_summand_tensor = context->input(max_summand_idx_); OP_REQUIRES( context, TensorShapeUtils::IsScalar(min_freezed_summand_tensor.shape()), absl::InvalidArgumentError(absl::StrCat( "`min_freezed_summand` must be rank 0 but is rank ", min_freezed_summand_tensor.dims()))); OP_REQUIRES( context, TensorShapeUtils::IsScalar(max_freezed_summand_tensor.shape()), absl::InvalidArgumentError(absl::StrCat( "`max_freezed_summand` must be rank 0 but is rank ", max_freezed_summand_tensor.dims()))); #ifndef ENABLE_ONEDNN_V3 const float min_freezed_output = min_freezed_output_tensor.template scalar<float>()(); const float max_freezed_output = max_freezed_output_tensor.template scalar<float>()(); float output_range = std::max(std::abs(min_freezed_output), std::abs(max_freezed_output)); #endif const float min_freezed_summand = min_freezed_summand_tensor.template scalar<float>()(); const float max_freezed_summand = max_freezed_summand_tensor.template scalar<float>()(); float summand_range = std::max(std::abs(min_freezed_summand), std::abs(max_freezed_summand)); if (summand_dt == DT_QUINT8) { params.post_op_params[post_op_to_idx_["sum"]] = { "sum", dnnl::algorithm::undef, {SUMMAND_SCALE_U8(summand_range, output_range)}, ""}; } else { params.post_op_params[post_op_to_idx_["sum"]] = { "sum", dnnl::algorithm::undef, {SUMMAND_SCALE_S8(summand_range, output_range)}, ""}; } } else { params.post_op_params[post_op_to_idx_["sum"]] = {"sum", dnnl::algorithm::undef, {1.0}, "", #ifdef ENABLE_ONEDNN_V3 summand_dt #endif }; } } if (IsFused(oneDNNFusedOps::kRelu)) { params.post_op_params[post_op_to_idx_["activation"]] = { "activation", dnnl::algorithm::eltwise_relu, {1.0, 0.0, 0.0}, ""}; } } void AllocateOutputTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, const memory::dims& output_dims_mkl_order, MklTensorFormat output_tf_format, MklDnnShape* output_mkl_shape, Tensor** output_tensor) override { if (!this->get_fuse_add()) { MklConvOp< Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true>::AllocateOutputTensor(context, conv_prim_desc, output_dims_mkl_order, output_tf_format, output_mkl_shape, output_tensor); } else { if (std::is_same<Toutput, quint8>::value) { int summand_idx = this->get_input_add_idx(); DataType summand_dt = this->input_type(summand_idx); bool summand_condition = (summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8); DCHECK((summand_condition)); Tensor& summand = const_cast<Tensor&>(context->input(summand_idx)); if (summand_dt == DT_QINT8) { OP_REQUIRES_OK(context, summand.BitcastFrom(summand, DT_QUINT8, summand.shape())); } OP_REQUIRES(context, context->forward_input_to_output_with_shape( summand_idx, 0, summand.shape(), output_tensor), absl::InvalidArgumentError( "Summand cannot be forwarded in the current fusion.")); return; } #ifndef ENABLE_ONEDNN_V3 MklConvOp< Device, Tinput, qint8, Tbias, Toutput, Ttemp_output, int32, false, false, is_depthwise, true>::AllocateOutputTensor(context, conv_prim_desc, output_dims_mkl_order, output_tf_format, output_mkl_shape, output_tensor); const Tensor& summand = context->input(this->get_input_add_idx()); if (summand.dtype() != DT_FLOAT) TF_CHECK_OK(absl::FailedPreconditionError( "Current fusion requires summand to be float")); const float min_input = context->input(min_input_idx_).template scalar<float>()(); const float max_input = context->input(max_input_idx_).template scalar<float>()(); const Tensor& min_filter_vector = context->input(min_filter_idx_); const Tensor& max_filter_vector = context->input(max_filter_idx_); const float* min_filter = min_filter_vector.flat<float>().data(); const float* max_filter = max_filter_vector.flat<float>().data(); const float int_const_scale_limit = (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; size_t depth = min_filter_vector.NumElements(); std::vector<float> scales(depth); for (size_t i = 0; i < depth; ++i) { scales[i] = int_const_scale_limit / (std::max(std::abs(max_input), std::abs(min_input)) * std::max(std::abs(max_filter[i]), std::abs(min_filter[i]))); } dnnl::primitive_attr reorder_attr; #ifndef ENABLE_ONEDNN_V3 if (depth == 1) { reorder_attr.set_output_scales(0, scales); } else { reorder_attr.set_output_scales(2, scales); } #else DCHECK_EQ(depth, 1); reorder_attr.set_scales_mask(DNNL_ARG_SRC, 0); reorder_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0); reorder_attr.set_scales_mask(DNNL_ARG_DST, 0); #endif auto summand_md = memory::desc(output_dims_mkl_order, MklDnnType<Tbias>(), memory::format_tag::nhwc); void* summand_buf = static_cast<void*>(const_cast<Tbias*>(summand.flat<Tbias>().data())); void* dst_buf = static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data()); summand_.reset(new memory(summand_md, this->cpu_engine_, summand_buf)); dst_.reset( new memory(conv_prim_desc.dst_desc(), this->cpu_engine_, dst_buf)); auto reorder_desc = ReorderPd(this->cpu_engine_, summand_md, this->cpu_engine_, conv_prim_desc.dst_desc(), reorder_attr); CreateAndExecuteReorder(reorder_desc, *summand_, *dst_, this->cpu_engine_, context); #else int summand_idx = this->get_input_add_idx(); DataType summand_dt = this->input_type(summand_idx); if (summand_dt != DT_FLOAT) TF_CHECK_OK(absl::FailedPreconditionError( "Summand datatype is expected to be float.")); Tensor& summand_float = const_cast<Tensor&>(context->input(summand_idx)); OP_REQUIRES_OK(context, summand_float.BitcastFrom(summand_float, DT_QINT32, summand_float.shape())); OP_REQUIRES(context, context->forward_input_to_output_with_shape( summand_idx, 0, summand_float.shape(), output_tensor), absl::InvalidArgumentError( "Summand cannot be forwarded in the current fusion.")); #endif } } void* GetBiasHandle(OpKernelContext* context, std::shared_ptr<ConvFwdPd>& conv_fwd_pd, const Tensor& bias_tensor) override { if (!this->get_fuse_biasadd()) { return nullptr; } #ifndef ENABLE_ONEDNN_V3 if (std::is_same<Tbias, qint32>::value) { return static_cast<Tbias*>( const_cast<Tbias*>(bias_tensor.flat<Tbias>().data())); } const float min_input = context->input(min_input_idx_).template scalar<float>()(); const float max_input = context->input(max_input_idx_).template scalar<float>()(); const Tensor& min_filter_vector = context->input(min_filter_idx_); const Tensor& max_filter_vector = context->input(max_filter_idx_); const float* min_filter = min_filter_vector.flat<float>().data(); const float* max_filter = max_filter_vector.flat<float>().data(); const float int_const_scale_limit = (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; size_t depth = min_filter_vector.NumElements(); bool scales_are_valid = (depth == scales_.size()); scales_.resize(depth); for (size_t i = 0; i < depth; ++i) { float tmp_scale = int_const_scale_limit / (std::max(std::abs(max_input), std::abs(min_input)) * std::max(std::abs(max_filter[i]), std::abs(min_filter[i]))); if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) { scales_are_valid = false; } scales_[i] = tmp_scale; } if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) { dnnl::primitive_attr bias_attr; #ifndef ENABLE_ONEDNN_V3 if (depth == 1) { bias_attr.set_output_scales(0, scales_); } else { bias_attr.set_output_scales(1, scales_); } #else DCHECK_EQ(depth, 1); bias_attr.set_scales_mask(DNNL_ARG_SRC, 0); bias_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0); bias_attr.set_scales_mask(DNNL_ARG_DST, 0); #endif auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())}, MklDnnType<Tbias>(), memory::format_tag::x); void* bias_buf = static_cast<void*>( const_cast<Tbias*>(bias_tensor.flat<Tbias>().data())); if (!input_bias_) { input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf); } else { input_bias_->set_data_handle(bias_buf); } if (!scaled_bias_buf_) AllocTmpBuffer<Tbias>(context, &scaled_bias_tensor_, conv_fwd_pd->bias_desc(), &scaled_bias_buf_); if (!scaled_bias_) { scaled_bias_ = new memory(bias_md, this->cpu_engine_, scaled_bias_buf_); } else { scaled_bias_->set_data_handle(scaled_bias_buf_); } auto reorder_desc = ReorderPd(this->cpu_engine_, input_bias_->get_desc(), this->cpu_engine_, scaled_bias_->get_desc(), bias_attr); CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_, this->cpu_engine_, context); Tbias* bias_data = reinterpret_cast<Tbias*>(scaled_bias_->get_data_handle()); if (is_bias_const_) CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_); return bias_data; } return GetCachedBias(context); #else if (std::is_same<Tbias, float>::value) { return static_cast<Tbias*>( const_cast<Tbias*>(bias_tensor.flat<Tbias>().data())); } const float min_input = context->input(min_input_idx_).template scalar<float>()(); const float max_input = context->input(max_input_idx_).template scalar<float>()(); const Tensor& min_filter_vector = context->input(min_filter_idx_); const Tensor& max_filter_vector = context->input(max_filter_idx_); if ((min_filter_vector.NumElements() == 0) || (max_filter_vector.NumElements() == 0) || (min_filter_vector.shape() != max_filter_vector.shape())) { TF_CHECK_OK(absl::FailedPreconditionError( "`min_filter and max_filter` must have same" "shape and contain at least one element.")); } const float* min_filter = min_filter_vector.flat<float>().data(); const float* max_filter = max_filter_vector.flat<float>().data(); const float int_const_scale_limit = (std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0; size_t depth = min_filter_vector.NumElements(); bool scales_are_valid = (depth == scales_.size()); scales_.resize(depth); for (size_t i = 0; i < depth; ++i) { float tmp_scale = int_const_scale_limit / (std::max(std::abs(max_input), std::abs(min_input)) * std::max(std::abs(max_filter[i]), std::abs(min_filter[i]))); if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) { scales_are_valid = false; } scales_[i] = tmp_scale; } if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) { dnnl::primitive_attr reorder_attr; if (depth == 1) { reorder_attr.set_scales_mask(DNNL_ARG_DST, 0); } else { reorder_attr.set_scales_mask(DNNL_ARG_DST, 1); } auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())}, MklDnnType<Tbias>(), memory::format_tag::x); void* bias_buf = static_cast<void*>( const_cast<Tbias*>(bias_tensor.flat<Tbias>().data())); if (!input_bias_) { input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf); } else { input_bias_->set_data_handle(bias_buf); } if (!scaled_bias_buf_) { AllocTmpBuffer<float>(context, &scaled_bias_tensor_, conv_fwd_pd->bias_desc(), &scaled_bias_buf_); } if (!scaled_bias_) { scaled_bias_ = new memory(conv_fwd_pd->bias_desc(), this->cpu_engine_, scaled_bias_buf_); } else { scaled_bias_->set_data_handle(scaled_bias_buf_); } std::unique_ptr<memory> scale_mem( new memory({{static_cast<int64_t>(depth)}, MklDnnType<float>(), memory::format_tag::x}, this->cpu_engine_, scales_.data())); auto reorder_desc = ReorderPd(this->cpu_engine_, input_bias_->get_desc(), this->cpu_engine_, scaled_bias_->get_desc(), reorder_attr); CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_, this->cpu_engine_, context, scale_mem.get()); float* bias_data = reinterpret_cast<float*>(scaled_bias_->get_data_handle()); if (is_bias_const_) CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_); return bias_data; } return GetCachedBias(context); #endif } bool is_bias_const_; Tensor cached_bias_data_ TF_GUARDED_BY(bias_cache_mu_); memory* input_bias_ = nullptr; memory* scaled_bias_ = nullptr; Tensor scaled_bias_tensor_; void* scaled_bias_buf_ = nullptr; private: std::vector<float> scales_; mutex bias_cache_mu_; std::vector<string> fused_ops_; std::map<string, int> post_op_to_idx_; int64_t fused_op_flags_ = 0; std::unordered_map<string, oneDNNFusedOps> str_to_enum_{ {"BiasAdd", oneDNNFusedOps::kBias}, {"Sum", oneDNNFusedOps::kSum}, {"Relu", oneDNNFusedOps::kRelu}, {"Requantize", oneDNNFusedOps::kRequantize}}; std::shared_ptr<dnnl::memory> summand_; std::shared_ptr<dnnl::memory> dst_; int min_input_idx_ = -1; int max_input_idx_ = -1; int min_filter_idx_ = -1; int max_filter_idx_ = -1; int min_bias_idx_ = -1; int max_bias_idx_ = -1; int min_summand_idx_ = -1; int max_summand_idx_ = -1; int min_freezed_output_idx_ = -1; int max_freezed_output_idx_ = -1; inline bool IsFused(oneDNNFusedOps op) { return fused_op_flags_ & (static_cast<int64_t>(op)); } inline oneDNNFusedOps StrToEnum(const string op) { CHECK_EQ(str_to_enum_.find(op) != str_to_enum_.end(), true) << "Error: Unknown post op: " << op; return str_to_enum_[op]; } void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc, Tensor** bias_tensor) { DCHECK(bias_tensor); TensorShape bias_tf_shape; bias_tf_shape.AddDim( (conv_prim_desc.bias_desc().get_size() / sizeof(TSCALED_BIAS))); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<TSCALED_BIAS>::value, bias_tf_shape, &cached_bias_data_)); *bias_tensor = &cached_bias_data_; } inline bool IsBiasCacheEmpty(OpKernelContext* context) TF_LOCKS_EXCLUDED(bias_cache_mu_) { tf_shared_lock lock(bias_cache_mu_); return (cached_bias_data_.NumElements() == 0); } void CacheBias(OpKernelContext* context, const std::shared_ptr<ConvFwdPd>& conv_fwd_pd, TSCALED_BIAS* bias_data, const memory* scaled_bias) TF_LOCKS_EXCLUDED(bias_cache_mu_) { mutex_lock lock(bias_cache_mu_); if (cached_bias_data_.NumElements() > 0) { return; } Tensor* bias_tensor_ptr = nullptr; AllocateTensor(context, *conv_fwd_pd, &bias_tensor_ptr); void* cached_bias_data = const_cast<void*>( static_cast<const void*>(bias_tensor_ptr->flat<TSCALED_BIAS>().data())); size_t cached_bias_data_size = scaled_bias->get_desc().get_size(); memcpy(cached_bias_data, bias_data, cached_bias_data_size); } TSCALED_BIAS* GetCachedBias(OpKernelContext* context) TF_LOCKS_EXCLUDED(bias_cache_mu_) { tf_shared_lock lock(bias_cache_mu_); const Tensor& cached_bias_data = cached_bias_data_; return static_cast<TSCALED_BIAS*>(const_cast<TSCALED_BIAS*>( cached_bias_data.flat<TSCALED_BIAS>().data())); } }; template <typename Device, typename Tinput, typename Tfilter, typename Tbias, typename Toutput, typename Ttemp_output, typename Tpadding, bool pad_enabled, bool native_format> class MklFusedConv3DOp : public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, false, false, false, native_format> { public: explicit MklFusedConv3DOp(OpKernelConstruction* context) : MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output, Tpadding, false, false, false, native_format>(context) { std::vector<string> fused_ops; OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops)); int num_args; OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args)); std::vector<int> padding_list; OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list)); if (padding_list.empty()) { OP_REQUIRES( context, !fused_ops.empty(), absl::InvalidArgumentError("Fused Conv3D must have at least one " "fused op when Pad is not fused.")); if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") == fused_ops.end()) { OP_REQUIRES(context, num_args == 1, absl::InvalidArgumentError( "Fused Conv3D must have one extra argument: bias.")); } else if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") == fused_ops.end() && std::find(fused_ops.begin(), fused_ops.end(), "Add") == fused_ops.end()) { OP_REQUIRES( context, num_args == 2, absl::InvalidArgumentError( "Fused Conv3D must have two extra arguments: bias and add.")); } } if (fused_ops == std::vector<string>{"BiasAdd"}) { this->set_fuse_biasadd(true); } else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) { this->set_fuse_biasadd(true); float leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); } else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); } else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) { this->set_fuse_biasadd(true); this->SET_FUSE_ACTIVATION_FOR_RELU6; } else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) { this->set_fuse_biasadd(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->SET_FUSE_ACTIVATION_FOR_RELU6; } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0); } else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) { this->set_fuse_biasadd(true); this->set_fuse_add(true); float leakyrelu_alpha; OP_REQUIRES_OK(context, context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha)); this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu, leakyrelu_alpha); } else { if (padding_list.empty()) { OP_REQUIRES(context, false, absl::UnimplementedError( absl::StrCat("Fusion is not implemented: [", absl::StrJoin(fused_ops, ","), "]"))); } } } virtual ~MklFusedConv3DOp() {} }; #define REGISTER_MKL_KERNEL(op, kernel, input_type, bias_type, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops) \ REGISTER_KERNEL_BUILDER( \ Name(op) \ .Device(DEVICE_CPU) \ .TypeConstraint<input_type>("Tinput") \ .TypeConstraint<qint8>("Tfilter") BIAS_TYPE_CONSTRAINT(bias_type) \ SUMMAND_TYPE_CONSTRAINT(summand_type) \ .TypeConstraint<output_type>("out_type") LABEL, \ kernel TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops)); #define REGISTER_MKL_KERNEL_ALL_INPUT_TYPES( \ op, kernel, bias_type, output_type, summand_type, is_depthwise, \ legacy_fused_ops, num_fused_ops) \ REGISTER_MKL_KERNEL(op, kernel, qint8, bias_type, output_type, summand_type, \ is_depthwise, legacy_fused_ops, num_fused_ops); \ REGISTER_MKL_KERNEL(op, kernel, quint8, bias_type, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops); #define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( \ op, kernel, input_type, output_type, summand_type, is_depthwise, \ legacy_fused_ops, num_fused_ops) \ REGISTER_MKL_KERNEL(op, kernel, input_type, qint32, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops); \ REGISTER_MKL_KERNEL(op, kernel, input_type, float, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops); #define REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( \ op, kernel, output_type, summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops) \ REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, qint32, output_type, \ summand_type, is_depthwise, \ legacy_fused_ops, num_fused_ops); \ REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, float, output_type, \ summand_type, is_depthwise, \ legacy_fused_ops, num_fused_ops); #define LABEL #define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \ summand_type, has_bias, is_depthwise, is_native) #define BIAS_TYPE_CONSTRAINT(bias_type) #define SUMMAND_TYPE_CONSTRAINT(summand_type) REGISTER_MKL_KERNEL("QuantizedConv2D", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBias", NoOp, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBiasAndRelu", NoOp, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedConv2DWithBiasSumAndRelu", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedConv2DAndRequantize", NoOp, quint8, float, qint8, qint8, false, false, false); REGISTER_MKL_KERNEL("QuantizedConv2DPerChannel", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedConv2DAndRelu", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedConv2DAndReluAndRequantize", NoOp, quint8, float, quint8, quint8, false, false, false); REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2D", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBias", NoOp, quint8, float, qint32, qint32, false, false, false); REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBiasAndRelu", NoOp, quint8, float, qint32, qint32, false, false, false); #undef SUMMAND_TYPE_CONSTRAINT #undef BIAS_TYPE_CONSTRAINT #define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias") #define SUMMAND_TYPE_CONSTRAINT(summand_type) REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( "QuantizedConv2DWithBiasAndRequantize", NoOp, qint8, qint8, false, false, false); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( "QuantizedConv2DWithBiasAndReluAndRequantize", NoOp, quint8, quint8, false, false, false); REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", NoOp, quint8, quint8, quint8, false, false, false); #undef SUMMAND_TYPE_CONSTRAINT #define SUMMAND_TYPE_CONSTRAINT(summand_type) \ .TypeConstraint<summand_type>("Tsummand") REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "QuantizedConv2DWithBiasSumAndReluAndRequantize", NoOp, quint8, quint8, quint8, false, false, false); REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", NoOp, quint8, quint8, qint8, false, false, false); #undef SUMMAND_TYPE_CONSTRAINT #undef BIAS_TYPE_CONSTRAINT #undef TEMPLATE_ARGS #undef LABEL #define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \ summand_type, is_depthwise, legacy_fused_ops, \ num_fused_ops) \ <CPUDevice, input_type, bias_type, output_type, summand_type, is_depthwise, \ legacy_fused_ops, num_fused_ops> #define BIAS_TYPE_CONSTRAINT(bias_type) #define SUMMAND_TYPE_CONSTRAINT(summand_type) #define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel) REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2D", MklQuantizedConvOp, float, qint32, qint32, false, quantized_fusions::none, 0); REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DPerChannel", MklQuantizedConvOp, float, qint32, qint32, false, quantized_fusions::none, 0); REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBias", MklQuantizedConvOp, float, qint32, qint32, false, quantized_fusions::bias, 1); REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBiasAndRelu", MklQuantizedConvOp, float, qint32, qint32, false, quantized_fusions::bias_relu, 2); REGISTER_MKL_KERNEL("_MklQuantizedConv2DWithBiasSumAndRelu", MklQuantizedConvOp, quint8, float, qint32, qint32, false, quantized_fusions::bias_sum_relu, 3); REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRequantize", MklQuantizedConvOp, quint8, float, qint8, qint8, false, quantized_fusions::requantize, 1); REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRelu", MklQuantizedConvOp, quint8, float, qint32, qint32, false, quantized_fusions::relu, 1); REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndReluAndRequantize", MklQuantizedConvOp, quint8, float, quint8, quint8, false, quantized_fusions::relu_requantize, 2); REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2D", MklQuantizedConvOp, quint8, float, qint32, qint32, true, quantized_fusions::none, 0); REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBias", MklQuantizedConvOp, quint8, float, qint32, qint32, true, quantized_fusions::bias, 1); REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBiasAndRelu", MklQuantizedConvOp, quint8, float, qint32, qint32, true, quantized_fusions::bias_relu, 2); #undef SUMMAND_TYPE_CONSTRAINT #undef BIAS_TYPE_CONSTRAINT #define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias") #define SUMMAND_TYPE_CONSTRAINT(summand_type) REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( "_MklQuantizedConv2DWithBiasAndRequantize", MklQuantizedConvOp, qint8, qint8, false, quantized_fusions::bias_requantize, 2); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( "_MklQuantizedConv2DWithBiasAndReluAndRequantize", MklQuantizedConvOp, quint8, quint8, false, quantized_fusions::bias_relu_requantize, 3); REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "_MklQuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", MklQuantizedConvOp, quint8, quint8, quint8, true, quantized_fusions::bias_relu_requantize, 3); #undef LABEL #define LABEL REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D", MklQuantizedConvOp, qint32, qint32, false, quantized_fusions::none, -1) REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D", MklQuantizedConvOp, qint32, qint32, true, quantized_fusions::none, -1) #undef LABEL #define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel) #undef SUMMAND_TYPE_CONSTRAINT #define SUMMAND_TYPE_CONSTRAINT(summand_type) \ .TypeConstraint<summand_type>("Tsummand") REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "_MklQuantizedConv2DWithBiasSumAndReluAndRequantize", MklQuantizedConvOp, quint8, quint8, quint8, false, quantized_fusions::bias_sum_relu_requantize, 4); REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( "_MklQuantizedConv2DWithBiasSignedSumAndReluAndRequantize", MklQuantizedConvOp, quint8, quint8, qint8, false, quantized_fusions::bias_sum_relu_requantize, 4); #undef LABEL #define LABEL REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D", MklQuantizedConvOp, qint8, qint8, false, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D", MklQuantizedConvOp, quint8, qint8, false, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D", MklQuantizedConvOp, quint8, quint8, false, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D", MklQuantizedConvOp, qint8, quint8, false, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D", MklQuantizedConvOp, qint8, qint8, true, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D", MklQuantizedConvOp, quint8, qint8, true, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D", MklQuantizedConvOp, quint8, quint8, true, quantized_fusions::none, -1); REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D", MklQuantizedConvOp, qint8, quint8, true, quantized_fusions::none, -1); #undef LABEL #undef SUMMAND_TYPE_CONSTRAINT #undef BIAS_TYPE_CONSTRAINT #undef TEMPLATE_ARGS #define REGISTER_NO_OP_CPU_2D_DEPTHWISE(T) \ REGISTER_KERNEL_BUILDER(Name("_FusedDepthwiseConv2dNative") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ NoOp); TF_CALL_float(REGISTER_NO_OP_CPU_2D_DEPTHWISE); TF_CALL_bfloat16(REGISTER_NO_OP_CPU_2D_DEPTHWISE); TF_CALL_half(REGISTER_NO_OP_CPU_2D_DEPTHWISE); #define REGISTER_MKL_CPU_2D(T) \ REGISTER_KERNEL_BUILDER( \ Name("_MklConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklConv2DWithBias") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("__MklDummyConv2DWithBias") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklDummyOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklPadWithConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int32>("Tpaddings") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklPadWithConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64_t>("Tpaddings") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("__MklDummyPadWithConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int32>("Tpaddings") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklDummyOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeConv2DWithBias") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativePadWithConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int32>("Tpaddings") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativePadWithConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64_t>("Tpaddings") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, true>); TF_CALL_float(REGISTER_MKL_CPU_2D); TF_CALL_bfloat16(REGISTER_MKL_CPU_2D); TF_CALL_half(REGISTER_MKL_CPU_2D); #define REGISTER_MKL_CPU_2D_DEPTHWISE(T) \ REGISTER_KERNEL_BUILDER( \ Name("_MklDepthwiseConv2dNative") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklFusedDepthwiseConv2dNative") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \ true, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeFusedDepthwiseConv2dNative") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \ true, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeDepthwiseConv2dNative") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, true>); TF_CALL_float(REGISTER_MKL_CPU_2D_DEPTHWISE); TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_DEPTHWISE); TF_CALL_half(REGISTER_MKL_CPU_2D_DEPTHWISE); #define REGISTER_MKL_CPU_2D_FUSED(T) \ REGISTER_KERNEL_BUILDER( \ Name("_MklFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklPadWithFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<int32>("Tpaddings") \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklPadWithFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64_t>("Tpaddings") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, false>); \ REGISTER_KERNEL_BUILDER( \ Name("__MklDummyPadWithFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int32>("Tpaddings") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklDummyOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativePadWithFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<int32>("Tpaddings") \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativePadWithFusedConv2D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64_t>("Tpaddings") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, true>); TF_CALL_float(REGISTER_MKL_CPU_2D_FUSED); TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_FUSED); TF_CALL_half(REGISTER_MKL_CPU_2D_FUSED); #define REGISTER_MKL_CPU_3D(T) \ REGISTER_KERNEL_BUILDER( \ Name("_MklConv3D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklLayoutDependentOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeConv3D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \ REGISTER_KERNEL_BUILDER( \ Name("_MklNativeFusedConv3D") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .Label(mkl_op_registry::kMklNameChangeOpLabel), \ MklFusedConv3DOp<CPUDevice, T, T, T, T, T, int32, false, true>); TF_CALL_float(REGISTER_MKL_CPU_3D); TF_CALL_bfloat16(REGISTER_MKL_CPU_3D); TF_CALL_half(REGISTER_MKL_CPU_3D); #undef APPEND_DEPTHWISE #undef APPEND_ELTWISE #undef GET_DATA_TYPE #undef SET_FUSE_ACTIVATION_FOR_RELU6 #undef SET_MKL_LAYOUT #undef OUTPUT_SCALE_DCHECK #undef TSCALED_BIAS #undef SCALE #undef SUMMAND_SCALE_U8 #undef SUMMAND_SCALE_S8 } #endif
#include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/util/mkl_util.h" namespace tensorflow { struct Conv2DDimensions { Conv2DDimensions(int n, int h, int w, int c, int fc, int fh, int fw) : input_batches(n), input_height(h), input_width(w), input_depth(c), filter_count(fc), filter_height(fh), filter_width(fw) {} int input_batches; int input_height; int input_width; int input_depth; int filter_count; int filter_height; int filter_width; }; static Tensor GetRandomTensor(const TensorShape& shape) { Tensor tensor(DT_FLOAT, TensorShape(shape)); tensor.flat<float>() = tensor.flat<float>().setRandom(); return tensor; } static Tensor GetRandomInputTensor(const Conv2DDimensions& dims) { return GetRandomTensor({dims.input_batches, dims.input_height, dims.input_width, dims.input_depth}); } static Tensor GetRandomFilterTensor(const Conv2DDimensions& dims) { return GetRandomTensor({dims.filter_height, dims.filter_width, dims.input_depth, dims.filter_count}); } static Tensor GetRandomOutputTensor(const Conv2DDimensions& dims) { return GetRandomTensor({dims.input_batches, dims.input_height, dims.input_width, dims.filter_count}); } static Tensor GetInputSizesTensor(const Conv2DDimensions& dims) { return test::AsTensor<int32>({dims.input_batches, dims.input_height, dims.input_width, dims.input_depth}); } static Tensor GetFilterSizesTensor(const Conv2DDimensions& dims) { return test::AsTensor<int32>({dims.filter_height, dims.filter_width, dims.input_depth, dims.filter_count}); } static Graph* DefaultConv2D(const Conv2DDimensions& dims) { auto* graph = new Graph(OpRegistry::Global()); Tensor input_t = GetRandomInputTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Node* input = test::graph::Constant(graph, input_t, "input"); Node* filter = test::graph::Constant(graph, filter_t, "filter"); Node* conv2d; TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d"), "Conv2D") .Input(input) .Input(filter) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Finalize(graph, &conv2d)); return graph; } static Graph* MklConv2D(const Conv2DDimensions& dims) { auto* graph = new Graph(OpRegistry::Global()); Tensor input_t = GetRandomInputTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Node* input = test::graph::Constant(graph, input_t, "input"); Node* filter = test::graph::Constant(graph, filter_t, "filter"); Node* not_mkl_shape = test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl"); Node* conv2d; TF_CHECK_OK(NodeBuilder(graph->NewName("mkl_conv_2d"), "_MklConv2D") .Input(input) .Input(filter) .Input(not_mkl_shape) .Input(not_mkl_shape) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Attr("_kernel", "MklOp") .Finalize(graph, &conv2d)); return graph; } static Graph* DefaultConv2DBwdInput(const Conv2DDimensions& dims) { auto* graph = new Graph(OpRegistry::Global()); Tensor input_sizes_t = GetInputSizesTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Tensor out_backprop_t = GetRandomOutputTensor(dims); Node* input_sizes = test::graph::Constant(graph, input_sizes_t, "input_sizes"); Node* filter = test::graph::Constant(graph, filter_t, "filter"); Node* out_backprop = test::graph::Constant(graph, out_backprop_t, "out_backprop"); Node* conv2d_bwd_input; TF_CHECK_OK( NodeBuilder(graph->NewName("conv_2d_bwd_input"), "Conv2DBackpropInput") .Input(input_sizes) .Input(filter) .Input(out_backprop) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Finalize(graph, &conv2d_bwd_input)); return graph; } static Graph* MklConv2DBwdInput(const Conv2DDimensions& dims) { auto* graph = new Graph(OpRegistry::Global()); Tensor input_sizes_t = GetInputSizesTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Tensor out_backprop_t = GetRandomOutputTensor(dims); Node* input_sizes = test::graph::Constant(graph, input_sizes_t, "input_sizes"); Node* filter = test::graph::Constant(graph, filter_t, "filter"); Node* out_backprop = test::graph::Constant(graph, out_backprop_t, "out_backprop"); Node* not_mkl_shape = test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl"); Node* conv2d_bwd_input; TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_input"), "_MklConv2DBackpropInput") .Input(input_sizes) .Input(filter) .Input(out_backprop) .Input(not_mkl_shape) .Input(not_mkl_shape) .Input(not_mkl_shape) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Attr("_kernel", "MklOp") .Finalize(graph, &conv2d_bwd_input)); return graph; } static Graph* DefaultConv2DBwdFilter(const Conv2DDimensions& dims) { auto* graph = new Graph(OpRegistry::Global()); Tensor input_t = GetRandomInputTensor(dims); Tensor filter_sizes_t = GetFilterSizesTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Tensor out_backprop_t = GetRandomOutputTensor(dims); Node* input = test::graph::Constant(graph, input_t, "input"); Node* filter_sizes = test::graph::Constant(graph, filter_sizes_t, "filter_sizes"); Node* out_backprop = test::graph::Constant(graph, out_backprop_t, "out_backprop"); Node* conv2d_bwd_filter; TF_CHECK_OK( NodeBuilder(graph->NewName("conv_2d_bwd_filter"), "Conv2DBackpropFilter") .Input(input) .Input(filter_sizes) .Input(out_backprop) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Finalize(graph, &conv2d_bwd_filter)); return graph; } static Graph* MklConv2DBwdFilter(const Conv2DDimensions& dims) { Graph* graph = new Graph(OpRegistry::Global()); Tensor input_t = GetRandomInputTensor(dims); Tensor filter_sizes_t = GetFilterSizesTensor(dims); Tensor filter_t = GetRandomFilterTensor(dims); Tensor out_backprop_t = GetRandomOutputTensor(dims); Node* input = test::graph::Constant(graph, input_t, "input"); Node* filter_sizes = test::graph::Constant(graph, filter_sizes_t, "filter_sizes"); Node* out_backprop = test::graph::Constant(graph, out_backprop_t, "out_backprop"); Node* not_mkl_shape = test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl"); Node* conv2d_bwd_filter; TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_filter"), "_MklConv2DBackpropFilter") .Input(input) .Input(filter_sizes) .Input(out_backprop) .Input(not_mkl_shape) .Input(not_mkl_shape) .Input(not_mkl_shape) .Attr("T", DT_FLOAT) .Attr("strides", {1, 1, 1, 1}) .Attr("padding", "SAME") .Attr("_kernel", "MklOp") .Finalize(graph, &conv2d_bwd_filter)); return graph; } #define BM_CONCAT(a, b) a##b #define BM_NAME(p, type, N, H, W, C, FC, FH, FW) \ BM_CONCAT(BM_##p##_##type##_in_##N##_##H##_##W##_##C, _f_##FC##_##FH##_##FW) #define BM_Conv2DT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \ static void BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, \ FW)(::testing::benchmark::State & state) { \ state.SetLabel(LABEL); \ \ int64 num_computed_elements = (N) * (H) * (W) * (FC); \ int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \ \ Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \ test::Benchmark(#type, BM_CONCAT(kind, Conv2D)(dims), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * flops_per_iter); \ } \ BENCHMARK(BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, FW)) #define BM_Conv2D(N, H, W, C, FC, FH, FW, type, LABEL) \ BM_Conv2DT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \ BM_Conv2DT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL); #define BM_Conv2DBwdInputT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \ static void BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, \ FW)(::testing::benchmark::State & state) { \ state.SetLabel(LABEL); \ \ int64 num_computed_elements = (N) * (H) * (W) * (C); \ int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \ \ Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \ test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdInput)(dims), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * flops_per_iter); \ } \ BENCHMARK(BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, FW)) #define BM_Conv2DBwdInput(N, H, W, C, FC, FH, FW, type, LABEL) \ BM_Conv2DBwdInputT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \ BM_Conv2DBwdInputT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL); #define BM_Conv2DBwdFilterT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \ static void BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, \ FW)(::testing::benchmark::State & state) { \ state.SetLabel(LABEL); \ \ int64 num_computed_elements = (FH) * (FW) * (C) * (FC); \ int64 flops_per_iter = num_computed_elements * ((N) * (H) * (W)); \ \ Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \ test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdFilter)(dims), \ false) \ .Run(state); \ state.SetItemsProcessed(state.iterations() * flops_per_iter); \ } \ BENCHMARK(BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, FW)) #define BM_Conv2DBwdFilter(N, H, W, C, FC, FH, FW, type, LABEL) \ BM_Conv2DBwdFilterT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \ BM_Conv2DBwdFilterT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL); BM_Conv2D(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3"); BM_Conv2D(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5"); BM_Conv2D(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3"); BM_Conv2D(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5"); BM_Conv2D(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3"); BM_Conv2D(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5"); BM_Conv2D(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3"); BM_Conv2DBwdInput(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3"); BM_Conv2DBwdInput(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5"); BM_Conv2DBwdInput(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3"); BM_Conv2DBwdInput(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5"); BM_Conv2DBwdInput(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3"); BM_Conv2DBwdInput(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5"); BM_Conv2DBwdInput(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3"); BM_Conv2DBwdFilter(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3"); BM_Conv2DBwdFilter(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5"); BM_Conv2DBwdFilter(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3"); BM_Conv2DBwdFilter(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5"); BM_Conv2DBwdFilter(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3"); BM_Conv2DBwdFilter(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5"); BM_Conv2DBwdFilter(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3"); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
beb94efe-689c-4864-a58d-c462f72b8745
cpp
google/leveldb
filter_block
table/filter_block.cc
table/filter_block_test.cc
#include "table/filter_block.h" #include "leveldb/filter_policy.h" #include "util/coding.h" namespace leveldb { static const size_t kFilterBaseLg = 11; static const size_t kFilterBase = 1 << kFilterBaseLg; FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy) : policy_(policy) {} void FilterBlockBuilder::StartBlock(uint64_t block_offset) { uint64_t filter_index = (block_offset / kFilterBase); assert(filter_index >= filter_offsets_.size()); while (filter_index > filter_offsets_.size()) { GenerateFilter(); } } void FilterBlockBuilder::AddKey(const Slice& key) { Slice k = key; start_.push_back(keys_.size()); keys_.append(k.data(), k.size()); } Slice FilterBlockBuilder::Finish() { if (!start_.empty()) { GenerateFilter(); } const uint32_t array_offset = result_.size(); for (size_t i = 0; i < filter_offsets_.size(); i++) { PutFixed32(&result_, filter_offsets_[i]); } PutFixed32(&result_, array_offset); result_.push_back(kFilterBaseLg); return Slice(result_); } void FilterBlockBuilder::GenerateFilter() { const size_t num_keys = start_.size(); if (num_keys == 0) { filter_offsets_.push_back(result_.size()); return; } start_.push_back(keys_.size()); tmp_keys_.resize(num_keys); for (size_t i = 0; i < num_keys; i++) { const char* base = keys_.data() + start_[i]; size_t length = start_[i + 1] - start_[i]; tmp_keys_[i] = Slice(base, length); } filter_offsets_.push_back(result_.size()); policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_); tmp_keys_.clear(); keys_.clear(); start_.clear(); } FilterBlockReader::FilterBlockReader(const FilterPolicy* policy, const Slice& contents) : policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) { size_t n = contents.size(); if (n < 5) return; base_lg_ = contents[n - 1]; uint32_t last_word = DecodeFixed32(contents.data() + n - 5); if (last_word > n - 5) return; data_ = contents.data(); offset_ = data_ + last_word; num_ = (n - 5 - last_word) / 4; } bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) { uint64_t index = block_offset >> base_lg_; if (index < num_) { uint32_t start = DecodeFixed32(offset_ + index * 4); uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4); if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) { Slice filter = Slice(data_ + start, limit - start); return policy_->KeyMayMatch(key, filter); } else if (start == limit) { return false; } } return true; } }
#include "table/filter_block.h" #include "gtest/gtest.h" #include "leveldb/filter_policy.h" #include "util/coding.h" #include "util/hash.h" #include "util/logging.h" #include "util/testutil.h" namespace leveldb { class TestHashFilter : public FilterPolicy { public: const char* Name() const override { return "TestHashFilter"; } void CreateFilter(const Slice* keys, int n, std::string* dst) const override { for (int i = 0; i < n; i++) { uint32_t h = Hash(keys[i].data(), keys[i].size(), 1); PutFixed32(dst, h); } } bool KeyMayMatch(const Slice& key, const Slice& filter) const override { uint32_t h = Hash(key.data(), key.size(), 1); for (size_t i = 0; i + 4 <= filter.size(); i += 4) { if (h == DecodeFixed32(filter.data() + i)) { return true; } } return false; } }; class FilterBlockTest : public testing::Test { public: TestHashFilter policy_; }; TEST_F(FilterBlockTest, EmptyBuilder) { FilterBlockBuilder builder(&policy_); Slice block = builder.Finish(); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block)); FilterBlockReader reader(&policy_, block); ASSERT_TRUE(reader.KeyMayMatch(0, "foo")); ASSERT_TRUE(reader.KeyMayMatch(100000, "foo")); } TEST_F(FilterBlockTest, SingleChunk) { FilterBlockBuilder builder(&policy_); builder.StartBlock(100); builder.AddKey("foo"); builder.AddKey("bar"); builder.AddKey("box"); builder.StartBlock(200); builder.AddKey("box"); builder.StartBlock(300); builder.AddKey("hello"); Slice block = builder.Finish(); FilterBlockReader reader(&policy_, block); ASSERT_TRUE(reader.KeyMayMatch(100, "foo")); ASSERT_TRUE(reader.KeyMayMatch(100, "bar")); ASSERT_TRUE(reader.KeyMayMatch(100, "box")); ASSERT_TRUE(reader.KeyMayMatch(100, "hello")); ASSERT_TRUE(reader.KeyMayMatch(100, "foo")); ASSERT_TRUE(!reader.KeyMayMatch(100, "missing")); ASSERT_TRUE(!reader.KeyMayMatch(100, "other")); } TEST_F(FilterBlockTest, MultiChunk) { FilterBlockBuilder builder(&policy_); builder.StartBlock(0); builder.AddKey("foo"); builder.StartBlock(2000); builder.AddKey("bar"); builder.StartBlock(3100); builder.AddKey("box"); builder.StartBlock(9000); builder.AddKey("box"); builder.AddKey("hello"); Slice block = builder.Finish(); FilterBlockReader reader(&policy_, block); ASSERT_TRUE(reader.KeyMayMatch(0, "foo")); ASSERT_TRUE(reader.KeyMayMatch(2000, "bar")); ASSERT_TRUE(!reader.KeyMayMatch(0, "box")); ASSERT_TRUE(!reader.KeyMayMatch(0, "hello")); ASSERT_TRUE(reader.KeyMayMatch(3100, "box")); ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo")); ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar")); ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello")); ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo")); ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar")); ASSERT_TRUE(!reader.KeyMayMatch(4100, "box")); ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello")); ASSERT_TRUE(reader.KeyMayMatch(9000, "box")); ASSERT_TRUE(reader.KeyMayMatch(9000, "hello")); ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo")); ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar")); } }
https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block.cc
https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block_test.cc
23e35d792b9154f922b8b575b12596a4d8664c65
464b7a60-847b-4ac6-bf92-ce88e1c0c36b
cpp
tensorflow/tensorflow
conditional_canonicalizer
third_party/xla/xla/service/conditional_canonicalizer.cc
third_party/xla/xla/service/conditional_canonicalizer_test.cc
#include "xla/service/conditional_canonicalizer.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/status_macros.h" namespace xla { namespace { absl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) { TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional); for (auto* branch : conditional->called_computations()) { HloInstruction* root = branch->root_instruction(); TF_RET_CHECK(!root->shape().IsTuple()); HloInstruction* tuple = branch->AddInstruction(HloInstruction::CreateTuple({root})); branch->set_root_instruction(tuple, true); } auto parent = conditional->parent(); const Shape& root_shape = conditional->shape(); auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1)); auto new_conditional = parent->AddInstruction(conditional->CloneWithNewShape(new_shape)); auto gte = parent->AddInstruction( HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0)); TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte)); return absl::OkStatus(); } } absl::StatusOr<bool> ConditionalCanonicalizer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 2, "ConditionalCanonicalizer::Run(), before:\n" + module->ToString()); bool changed = false; for (auto* comp : module->MakeNonfusionComputations(execution_threads)) { for (auto* inst : comp->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kConditional && !inst->shape().IsTuple()) { TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst)); changed = true; } } } XLA_VLOG_LINES( 2, "ConditionalCanonicalizer::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/conditional_canonicalizer.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_parser.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_utils.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/util.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class ConditionalCanonicalizerTest : public HloTestBase { protected: ConditionalCanonicalizerTest() {} }; TEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) { auto module = ParseAndReturnVerifiedModule(R"( HloModule _ true_branch { true_param = (s32[3,2]) parameter(0) ROOT root = s32[] constant(0) } false_branch { false_param = (s32[3,2]) parameter(0) ROOT root = s32[] constant(1) } ENTRY entry { param0 = s32[3,2] parameter(0) branch = pred[] constant(false) param_tuple = (s32[3 ,2]) tuple(param0) ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple), true_computation=true_branch, false_computation=false_branch } )") .value(); ConditionalCanonicalizer pass; EXPECT_TRUE(pass.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), op::GetTupleElement(op::Conditional())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cf805915-470c-4ec5-9114-22bba8f23da3
cpp
tensorflow/tensorflow
io
tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc
tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h" #include <string> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace stablehlo::quantization::io { absl::StatusOr<std::string> GetLocalTmpFileName(tsl::Env* const env) { std::string tmp_fname{}; if (!env->LocalTempFilename(&tmp_fname)) { return absl::InternalError("Failed to create tmp file name."); } return tmp_fname; } absl::StatusOr<std::string> GetLocalTmpFileName() { return GetLocalTmpFileName(tsl::Env::Default()); } absl::StatusOr<std::string> CreateTmpDir(tsl::Env* const env) { TF_ASSIGN_OR_RETURN(std::string tmp_dir, GetLocalTmpFileName(env)); if (!env->RecursivelyCreateDir(tmp_dir).ok()) { return absl::InternalError( absl::StrFormat("Failed to create tmp dir: '%s'", tmp_dir)); } return tmp_dir; } absl::StatusOr<std::string> CreateTmpDir() { return CreateTmpDir(tsl::Env::Default()); } absl::Status WriteStringToFile(const absl::string_view file_path, const absl::string_view data) { auto* env = tsl::Env::Default(); return WriteStringToFile(env, std::string(file_path), data); } absl::StatusOr<std::string> ReadFileToString( const absl::string_view file_path) { auto* env = tsl::Env::Default(); std::string data{}; absl::Status read_status = ReadFileToString(env, std::string(file_path), &data); if (read_status.ok()) { return data; } else { return read_status; } } absl::StatusOr<std::vector<std::string>> ListDirectory( absl::string_view directory) { std::vector<std::string> children; TF_RETURN_IF_ERROR( tsl::Env::Default()->GetChildren(std::string(directory), &children)); return children; } }
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h" #include <cstdint> #include <fstream> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/functional/any_invocable.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "tsl/platform/env.h" #include "tsl/platform/file_system.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/types.h" namespace stablehlo::quantization::io { namespace { using ::testing::Eq; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::Not; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; using ::tsl::testing::IsOk; using ::tsl::testing::StatusIs; class TestEnvBrokenFileSystem : public tsl::Env { public: TestEnvBrokenFileSystem() = default; bool MatchPath(const tsl::string& path, const tsl::string& pattern) override { return false; } void SleepForMicroseconds(int64_t micros) override {} tsl::string GetRunfilesDir() override { return tsl::string("dummy_path"); } int32_t GetCurrentThreadId() override { return 0; } tsl::Thread* StartThread(const tsl::ThreadOptions& thread_options, const tsl::string& name, absl::AnyInvocable<void()> fn) override { return nullptr; } bool GetCurrentThreadName(tsl::string* name) override { return false; } void SchedClosure(absl::AnyInvocable<void()> closure) override {} void SchedClosureAfter(int64_t micros, absl::AnyInvocable<void()> closure) override {} absl::Status LoadDynamicLibrary(const char* library_filename, void** handle) override { return absl::OkStatus(); } absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name, void** symbol) override { return absl::OkStatus(); } tsl::string FormatLibraryFileName(const tsl::string& name, const tsl::string& version) override { return tsl::string("dummy_path"); } absl::Status GetFileSystemForFile(const std::string& fname, tsl::FileSystem** result) override { return absl::InternalError("Broken file system"); } private: void GetLocalTempDirectories(std::vector<tsl::string>* list) override { list->push_back("/tmp"); } }; class TestEnvBrokenFileSystemAndNoLocalTempDirs : public TestEnvBrokenFileSystem { private: void GetLocalTempDirectories(std::vector<tsl::string>* list) override {} }; TEST(IoTest, GetLocalTmpFileNameGivesValidFileName) { absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName(); ASSERT_THAT(tmp_file_name, IsOk()); EXPECT_THAT(*tmp_file_name, Not(IsEmpty())); } TEST(IoTest, GetLocalTmpFileNameWhenNoTempDirsReturnsInternalError) { TestEnvBrokenFileSystemAndNoLocalTempDirs broken_env; absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName(&broken_env); EXPECT_THAT(tmp_file_name, StatusIs(absl::StatusCode::kInternal, HasSubstr("Failed to create tmp file name"))); } TEST(IoTest, CreateTmpDirReturnsValidTmpPath) { absl::StatusOr<std::string> tmp_dir = CreateTmpDir(); ASSERT_THAT(tmp_dir, IsOk()); auto* const env = tsl::Env::Default(); EXPECT_THAT(env->FileExists(*tmp_dir), IsOk()); } TEST(IoTest, CreateTmpDirWhenInvalidPathReturnsInternalError) { TestEnvBrokenFileSystem test_env{}; absl::StatusOr<std::string> tmp_dir = CreateTmpDir(&test_env); EXPECT_THAT(tmp_dir, StatusIs(absl::StatusCode::kInternal, HasSubstr("Failed to create tmp dir"))); } TEST(IoTest, WriteStringToFile) { const std::string dst_file_path = absl::StrCat(testing::TempDir(), "/tmp_file"); const absl::Status write_status = WriteStringToFile(dst_file_path, "test_string"); ASSERT_THAT(write_status, IsOk()); auto* const env = tsl::Env::Default(); ASSERT_THAT(env->FileExists(dst_file_path), IsOk()); std::string data{}; ASSERT_THAT(tsl::ReadFileToString(env, dst_file_path, &data), IsOk()); EXPECT_THAT(data, Eq("test_string")); } TEST(IoTest, ReadFileToString) { const std::string src_file_path = absl::StrCat(testing::TempDir(), "/tmp_file"); { std::ofstream ofs(src_file_path); ofs << "test_string"; } const absl::StatusOr<std::string> read_status = ReadFileToString(src_file_path); ASSERT_THAT(read_status, IsOk()); EXPECT_THAT(*read_status, Eq("test_string")); } TEST(IoTest, ListChildrenInDirectory) { absl::StatusOr<std::string> tmp_dir = CreateTmpDir(); ASSERT_THAT(tmp_dir, IsOk()); auto* const env = tsl::Env::Default(); EXPECT_THAT(env->FileExists(*tmp_dir), IsOk()); ASSERT_THAT( WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file1"), "test_string"), IsOk()); ASSERT_THAT( WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file2"), "test_string"), IsOk()); ASSERT_THAT(env->RecursivelyCreateDir(absl::StrCat(*tmp_dir, "/subdir")), IsOk()); absl::StatusOr<std::vector<std::string>> children = ListDirectory(*tmp_dir); EXPECT_THAT(children, IsOk()); EXPECT_THAT(children.value(), SizeIs(3)); EXPECT_THAT(children.value(), UnorderedElementsAre("subdir", "tmp_file1", "tmp_file2")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7aae7bdd-3782-4a7a-a22a-065ac38df147
cpp
google/quiche
quic_unacked_packet_map
quiche/quic/core/quic_unacked_packet_map.cc
quiche/quic/core/quic_unacked_packet_map_test.cc
#include "quiche/quic/core/quic_unacked_packet_map.h" #include <cstddef> #include <limits> #include <type_traits> #include <utility> #include "absl/container/inlined_vector.h" #include "quiche/quic/core/quic_connection_stats.h" #include "quiche/quic/core/quic_packet_number.h" #include "quiche/quic/core/quic_types.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_flag_utils.h" namespace quic { namespace { bool WillStreamFrameLengthSumWrapAround(QuicPacketLength lhs, QuicPacketLength rhs) { static_assert( std::is_unsigned<QuicPacketLength>::value, "This function assumes QuicPacketLength is an unsigned integer type."); return std::numeric_limits<QuicPacketLength>::max() - lhs < rhs; } enum QuicFrameTypeBitfield : uint32_t { kInvalidFrameBitfield = 0, kPaddingFrameBitfield = 1, kRstStreamFrameBitfield = 1 << 1, kConnectionCloseFrameBitfield = 1 << 2, kGoawayFrameBitfield = 1 << 3, kWindowUpdateFrameBitfield = 1 << 4, kBlockedFrameBitfield = 1 << 5, kStopWaitingFrameBitfield = 1 << 6, kPingFrameBitfield = 1 << 7, kCryptoFrameBitfield = 1 << 8, kHandshakeDoneFrameBitfield = 1 << 9, kStreamFrameBitfield = 1 << 10, kAckFrameBitfield = 1 << 11, kMtuDiscoveryFrameBitfield = 1 << 12, kNewConnectionIdFrameBitfield = 1 << 13, kMaxStreamsFrameBitfield = 1 << 14, kStreamsBlockedFrameBitfield = 1 << 15, kPathResponseFrameBitfield = 1 << 16, kPathChallengeFrameBitfield = 1 << 17, kStopSendingFrameBitfield = 1 << 18, kMessageFrameBitfield = 1 << 19, kNewTokenFrameBitfield = 1 << 20, kRetireConnectionIdFrameBitfield = 1 << 21, kAckFrequencyFrameBitfield = 1 << 22, kResetStreamAtFrameBitfield = 1 << 23, }; QuicFrameTypeBitfield GetFrameTypeBitfield(QuicFrameType type) { switch (type) { case PADDING_FRAME: return kPaddingFrameBitfield; case RST_STREAM_FRAME: return kRstStreamFrameBitfield; case CONNECTION_CLOSE_FRAME: return kConnectionCloseFrameBitfield; case GOAWAY_FRAME: return kGoawayFrameBitfield; case WINDOW_UPDATE_FRAME: return kWindowUpdateFrameBitfield; case BLOCKED_FRAME: return kBlockedFrameBitfield; case STOP_WAITING_FRAME: return kStopWaitingFrameBitfield; case PING_FRAME: return kPingFrameBitfield; case CRYPTO_FRAME: return kCryptoFrameBitfield; case HANDSHAKE_DONE_FRAME: return kHandshakeDoneFrameBitfield; case STREAM_FRAME: return kStreamFrameBitfield; case ACK_FRAME: return kAckFrameBitfield; case MTU_DISCOVERY_FRAME: return kMtuDiscoveryFrameBitfield; case NEW_CONNECTION_ID_FRAME: return kNewConnectionIdFrameBitfield; case MAX_STREAMS_FRAME: return kMaxStreamsFrameBitfield; case STREAMS_BLOCKED_FRAME: return kStreamsBlockedFrameBitfield; case PATH_RESPONSE_FRAME: return kPathResponseFrameBitfield; case PATH_CHALLENGE_FRAME: return kPathChallengeFrameBitfield; case STOP_SENDING_FRAME: return kStopSendingFrameBitfield; case MESSAGE_FRAME: return kMessageFrameBitfield; case NEW_TOKEN_FRAME: return kNewTokenFrameBitfield; case RETIRE_CONNECTION_ID_FRAME: return kRetireConnectionIdFrameBitfield; case ACK_FREQUENCY_FRAME: return kAckFrequencyFrameBitfield; case RESET_STREAM_AT_FRAME: return kResetStreamAtFrameBitfield; case NUM_FRAME_TYPES: QUIC_BUG(quic_bug_10518_1) << "Unexpected frame type"; return kInvalidFrameBitfield; } QUIC_BUG(quic_bug_10518_2) << "Unexpected frame type"; return kInvalidFrameBitfield; } } QuicUnackedPacketMap::QuicUnackedPacketMap(Perspective perspective) : perspective_(perspective), least_unacked_(FirstSendingPacketNumber()), bytes_in_flight_(0), bytes_in_flight_per_packet_number_space_{0, 0, 0}, packets_in_flight_(0), last_inflight_packet_sent_time_(QuicTime::Zero()), last_inflight_packets_sent_time_{ {QuicTime::Zero()}, {QuicTime::Zero()}, {QuicTime::Zero()}}, last_crypto_packet_sent_time_(QuicTime::Zero()), session_notifier_(nullptr), supports_multiple_packet_number_spaces_(false) {} QuicUnackedPacketMap::~QuicUnackedPacketMap() { for (QuicTransmissionInfo& transmission_info : unacked_packets_) { DeleteFrames(&(transmission_info.retransmittable_frames)); } } const QuicTransmissionInfo& QuicUnackedPacketMap::AddDispatcherSentPacket( const DispatcherSentPacket& packet) { QuicPacketNumber packet_number = packet.packet_number; QUICHE_DCHECK_EQ(least_unacked_, FirstSendingPacketNumber()); QUIC_BUG_IF(quic_unacked_map_dispatcher_packet_num_too_small, largest_sent_packet_.IsInitialized() && largest_sent_packet_ >= packet_number) << "largest_sent_packet_: " << largest_sent_packet_ << ", packet_number: " << packet_number; QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size()); while (least_unacked_ + unacked_packets_.size() < packet_number) { unacked_packets_.push_back(QuicTransmissionInfo()); unacked_packets_.back().state = NEVER_SENT; } QuicTransmissionInfo& info = unacked_packets_.emplace_back(ENCRYPTION_INITIAL, NOT_RETRANSMISSION, packet.sent_time, packet.bytes_sent, false, false, ECN_NOT_ECT); QUICHE_DCHECK(!info.in_flight); info.state = NOT_CONTRIBUTING_RTT; info.largest_acked = packet.largest_acked; largest_sent_largest_acked_.UpdateMax(packet.largest_acked); largest_sent_packet_ = packet_number; return info; } void QuicUnackedPacketMap::AddSentPacket(SerializedPacket* mutable_packet, TransmissionType transmission_type, QuicTime sent_time, bool set_in_flight, bool measure_rtt, QuicEcnCodepoint ecn_codepoint) { const SerializedPacket& packet = *mutable_packet; QuicPacketNumber packet_number = packet.packet_number; QuicPacketLength bytes_sent = packet.encrypted_length; QUIC_BUG_IF(quic_bug_12645_1, largest_sent_packet_.IsInitialized() && largest_sent_packet_ >= packet_number) << "largest_sent_packet_: " << largest_sent_packet_ << ", packet_number: " << packet_number; QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size()); while (least_unacked_ + unacked_packets_.size() < packet_number) { unacked_packets_.push_back(QuicTransmissionInfo()); unacked_packets_.back().state = NEVER_SENT; } const bool has_crypto_handshake = packet.has_crypto_handshake == IS_HANDSHAKE; QuicTransmissionInfo info(packet.encryption_level, transmission_type, sent_time, bytes_sent, has_crypto_handshake, packet.has_ack_frequency, ecn_codepoint); info.largest_acked = packet.largest_acked; largest_sent_largest_acked_.UpdateMax(packet.largest_acked); if (!measure_rtt) { QUIC_BUG_IF(quic_bug_12645_2, set_in_flight) << "Packet " << mutable_packet->packet_number << ", transmission type " << TransmissionTypeToString(mutable_packet->transmission_type) << ", retransmittable frames: " << QuicFramesToString(mutable_packet->retransmittable_frames) << ", nonretransmittable_frames: " << QuicFramesToString(mutable_packet->nonretransmittable_frames); info.state = NOT_CONTRIBUTING_RTT; } largest_sent_packet_ = packet_number; if (set_in_flight) { const PacketNumberSpace packet_number_space = GetPacketNumberSpace(info.encryption_level); bytes_in_flight_ += bytes_sent; bytes_in_flight_per_packet_number_space_[packet_number_space] += bytes_sent; ++packets_in_flight_; info.in_flight = true; largest_sent_retransmittable_packets_[packet_number_space] = packet_number; last_inflight_packet_sent_time_ = sent_time; last_inflight_packets_sent_time_[packet_number_space] = sent_time; } unacked_packets_.push_back(std::move(info)); if (has_crypto_handshake) { last_crypto_packet_sent_time_ = sent_time; } mutable_packet->retransmittable_frames.swap( unacked_packets_.back().retransmittable_frames); } void QuicUnackedPacketMap::RemoveObsoletePackets() { while (!unacked_packets_.empty()) { if (!IsPacketUseless(least_unacked_, unacked_packets_.front())) { break; } DeleteFrames(&unacked_packets_.front().retransmittable_frames); unacked_packets_.pop_front(); ++least_unacked_; } } bool QuicUnackedPacketMap::HasRetransmittableFrames( QuicPacketNumber packet_number) const { QUICHE_DCHECK_GE(packet_number, least_unacked_); QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size()); return HasRetransmittableFrames( unacked_packets_[packet_number - least_unacked_]); } bool QuicUnackedPacketMap::HasRetransmittableFrames( const QuicTransmissionInfo& info) const { if (!QuicUtils::IsAckable(info.state)) { return false; } for (const auto& frame : info.retransmittable_frames) { if (session_notifier_->IsFrameOutstanding(frame)) { return true; } } return false; } void QuicUnackedPacketMap::RemoveRetransmittability( QuicTransmissionInfo* info) { DeleteFrames(&info->retransmittable_frames); info->first_sent_after_loss.Clear(); } void QuicUnackedPacketMap::RemoveRetransmittability( QuicPacketNumber packet_number) { QUICHE_DCHECK_GE(packet_number, least_unacked_); QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size()); QuicTransmissionInfo* info = &unacked_packets_[packet_number - least_unacked_]; RemoveRetransmittability(info); } void QuicUnackedPacketMap::IncreaseLargestAcked( QuicPacketNumber largest_acked) { QUICHE_DCHECK(!largest_acked_.IsInitialized() || largest_acked_ <= largest_acked); largest_acked_ = largest_acked; } void QuicUnackedPacketMap::MaybeUpdateLargestAckedOfPacketNumberSpace( PacketNumberSpace packet_number_space, QuicPacketNumber packet_number) { largest_acked_packets_[packet_number_space].UpdateMax(packet_number); } bool QuicUnackedPacketMap::IsPacketUsefulForMeasuringRtt( QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const { return QuicUtils::IsAckable(info.state) && (!largest_acked_.IsInitialized() || packet_number > largest_acked_) && info.state != NOT_CONTRIBUTING_RTT; } bool QuicUnackedPacketMap::IsPacketUsefulForCongestionControl( const QuicTransmissionInfo& info) const { return info.in_flight; } bool QuicUnackedPacketMap::IsPacketUsefulForRetransmittableData( const QuicTransmissionInfo& info) const { return info.first_sent_after_loss.IsInitialized() && (!largest_acked_.IsInitialized() || info.first_sent_after_loss > largest_acked_); } bool QuicUnackedPacketMap::IsPacketUseless( QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const { return !IsPacketUsefulForMeasuringRtt(packet_number, info) && !IsPacketUsefulForCongestionControl(info) && !IsPacketUsefulForRetransmittableData(info); } bool QuicUnackedPacketMap::IsUnacked(QuicPacketNumber packet_number) const { if (packet_number < least_unacked_ || packet_number >= least_unacked_ + unacked_packets_.size()) { return false; } return !IsPacketUseless(packet_number, unacked_packets_[packet_number - least_unacked_]); } void QuicUnackedPacketMap::RemoveFromInFlight(QuicTransmissionInfo* info) { if (info->in_flight) { QUIC_BUG_IF(quic_bug_12645_3, bytes_in_flight_ < info->bytes_sent); QUIC_BUG_IF(quic_bug_12645_4, packets_in_flight_ == 0); bytes_in_flight_ -= info->bytes_sent; --packets_in_flight_; const PacketNumberSpace packet_number_space = GetPacketNumberSpace(info->encryption_level); if (bytes_in_flight_per_packet_number_space_[packet_number_space] < info->bytes_sent) { QUIC_BUG(quic_bug_10518_3) << "bytes_in_flight: " << bytes_in_flight_per_packet_number_space_[packet_number_space] << " is smaller than bytes_sent: " << info->bytes_sent << " for packet number space: " << PacketNumberSpaceToString(packet_number_space); bytes_in_flight_per_packet_number_space_[packet_number_space] = 0; } else { bytes_in_flight_per_packet_number_space_[packet_number_space] -= info->bytes_sent; } if (bytes_in_flight_per_packet_number_space_[packet_number_space] == 0) { last_inflight_packets_sent_time_[packet_number_space] = QuicTime::Zero(); } info->in_flight = false; } } void QuicUnackedPacketMap::RemoveFromInFlight(QuicPacketNumber packet_number) { QUICHE_DCHECK_GE(packet_number, least_unacked_); QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size()); QuicTransmissionInfo* info = &unacked_packets_[packet_number - least_unacked_]; RemoveFromInFlight(info); } absl::InlinedVector<QuicPacketNumber, 2> QuicUnackedPacketMap::NeuterUnencryptedPackets() { absl::InlinedVector<QuicPacketNumber, 2> neutered_packets; QuicPacketNumber packet_number = GetLeastUnacked(); for (QuicUnackedPacketMap::iterator it = begin(); it != end(); ++it, ++packet_number) { if (!it->retransmittable_frames.empty() && it->encryption_level == ENCRYPTION_INITIAL) { QUIC_DVLOG(2) << "Neutering unencrypted packet " << packet_number; RemoveFromInFlight(packet_number); it->state = NEUTERED; neutered_packets.push_back(packet_number); NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero()); QUICHE_DCHECK(!HasRetransmittableFrames(*it)); } } QUICHE_DCHECK(!supports_multiple_packet_number_spaces_ || last_inflight_packets_sent_time_[INITIAL_DATA] == QuicTime::Zero()); return neutered_packets; } absl::InlinedVector<QuicPacketNumber, 2> QuicUnackedPacketMap::NeuterHandshakePackets() { absl::InlinedVector<QuicPacketNumber, 2> neutered_packets; QuicPacketNumber packet_number = GetLeastUnacked(); for (QuicUnackedPacketMap::iterator it = begin(); it != end(); ++it, ++packet_number) { if (!it->retransmittable_frames.empty() && GetPacketNumberSpace(it->encryption_level) == HANDSHAKE_DATA) { QUIC_DVLOG(2) << "Neutering handshake packet " << packet_number; RemoveFromInFlight(packet_number); it->state = NEUTERED; neutered_packets.push_back(packet_number); NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero()); } } QUICHE_DCHECK(!supports_multiple_packet_number_spaces() || last_inflight_packets_sent_time_[HANDSHAKE_DATA] == QuicTime::Zero()); return neutered_packets; } bool QuicUnackedPacketMap::HasInFlightPackets() const { return bytes_in_flight_ > 0; } const QuicTransmissionInfo& QuicUnackedPacketMap::GetTransmissionInfo( QuicPacketNumber packet_number) const { return unacked_packets_[packet_number - least_unacked_]; } QuicTransmissionInfo* QuicUnackedPacketMap::GetMutableTransmissionInfo( QuicPacketNumber packet_number) { return &unacked_packets_[packet_number - least_unacked_]; } QuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime() const { return last_inflight_packet_sent_time_; } QuicTime QuicUnackedPacketMap::GetLastCryptoPacketSentTime() const { return last_crypto_packet_sent_time_; } size_t QuicUnackedPacketMap::GetNumUnackedPacketsDebugOnly() const { size_t unacked_packet_count = 0; QuicPacketNumber packet_number = least_unacked_; for (auto it = begin(); it != end(); ++it, ++packet_number) { if (!IsPacketUseless(packet_number, *it)) { ++unacked_packet_count; } } return unacked_packet_count; } bool QuicUnackedPacketMap::HasMultipleInFlightPackets() const { if (bytes_in_flight_ > kDefaultTCPMSS) { return true; } size_t num_in_flight = 0; for (auto it = rbegin(); it != rend(); ++it) { if (it->in_flight) { ++num_in_flight; } if (num_in_flight > 1) { return true; } } return false; } bool QuicUnackedPacketMap::HasPendingCryptoPackets() const { return session_notifier_->HasUnackedCryptoData(); } bool QuicUnackedPacketMap::HasUnackedRetransmittableFrames() const { for (auto it = rbegin(); it != rend(); ++it) { if (it->in_flight && HasRetransmittableFrames(*it)) { return true; } } return false; } QuicPacketNumber QuicUnackedPacketMap::GetLeastUnacked() const { return least_unacked_; } void QuicUnackedPacketMap::SetSessionNotifier( SessionNotifierInterface* session_notifier) { session_notifier_ = session_notifier; } bool QuicUnackedPacketMap::NotifyFramesAcked(const QuicTransmissionInfo& info, QuicTime::Delta ack_delay, QuicTime receive_timestamp) { if (session_notifier_ == nullptr) { return false; } bool new_data_acked = false; for (const QuicFrame& frame : info.retransmittable_frames) { if (session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp)) { new_data_acked = true; } } return new_data_acked; } void QuicUnackedPacketMap::NotifyFramesLost(const QuicTransmissionInfo& info, TransmissionType ) { for (const QuicFrame& frame : info.retransmittable_frames) { session_notifier_->OnFrameLost(frame); } } bool QuicUnackedPacketMap::RetransmitFrames(const QuicFrames& frames, TransmissionType type) { return session_notifier_->RetransmitFrames(frames, type); } void QuicUnackedPacketMap::MaybeAggregateAckedStreamFrame( const QuicTransmissionInfo& info, QuicTime::Delta ack_delay, QuicTime receive_timestamp) { if (session_notifier_ == nullptr) { return; } for (const auto& frame : info.retransmittable_frames) { const bool can_aggregate = frame.type == STREAM_FRAME && frame.stream_frame.stream_id == aggregated_stream_frame_.stream_id && frame.stream_frame.offset == aggregated_stream_frame_.offset + aggregated_stream_frame_.data_length && !WillStreamFrameLengthSumWrapAround( aggregated_stream_frame_.data_length, frame.stream_frame.data_length); if (can_aggregate) { aggregated_stream_frame_.data_length += frame.stream_frame.data_length; aggregated_stream_frame_.fin = frame.stream_frame.fin; if (aggregated_stream_frame_.fin) { NotifyAggregatedStreamFrameAcked(ack_delay); } continue; } NotifyAggregatedStreamFrameAcked(ack_delay); if (frame.type != STREAM_FRAME || frame.stream_frame.fin) { session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp); continue; } aggregated_stream_frame_.stream_id = frame.stream_frame.stream_id; aggregated_stream_frame_.offset = frame.stream_frame.offset; aggregated_stream_frame_.data_length = frame.stream_frame.data_length; aggregated_stream_frame_.fin = frame.stream_frame.fin; } } void QuicUnackedPacketMap::NotifyAggregatedStreamFrameAcked( QuicTime::Delta ack_delay) { if (aggregated_stream_frame_.stream_id == static_cast<QuicStreamId>(-1) || session_notifier_ == nullptr) { return; } session_notifier_->OnFrameAcked(QuicFrame(aggregated_stream_frame_), ack_delay, QuicTime::Zero()); aggregated_stream_frame_.stream_id = -1; } PacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace( QuicPacketNumber packet_number) const { return GetPacketNumberSpace( GetTransmissionInfo(packet_number).encryption_level); } PacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace( EncryptionLevel encryption_level) const { if (supports_multiple_packet_number_spaces_) { return QuicUtils::GetPacketNumberSpace(encryption_level); } if (perspective_ == Perspective::IS_CLIENT) { return encryption_level == ENCRYPTION_INITIAL ? HANDSHAKE_DATA : APPLICATION_DATA; } return encryption_level == ENCRYPTION_FORWARD_SECURE ? APPLICATION_DATA : HANDSHAKE_DATA; } QuicPacketNumber QuicUnackedPacketMap::GetLargestAckedOfPacketNumberSpace( PacketNumberSpace packet_number_space) const { if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) { QUIC_BUG(quic_bug_10518_4) << "Invalid packet number space: " << packet_number_space; return QuicPacketNumber(); } return largest_acked_packets_[packet_number_space]; } QuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime( PacketNumberSpace packet_number_space) const { if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) { QUIC_BUG(quic_bug_10518_5) << "Invalid packet number space: " << packet_number_space; return QuicTime::Zero(); } return last_inflight_packets_sent_time_[packet_number_space]; } QuicPacketNumber QuicUnackedPacketMap::GetLargestSentRetransmittableOfPacketNumberSpace( PacketNumberSpace packet_number_space) const { if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) { QUIC_BUG(quic_bug_10518_6) << "Invalid packet number space: " << packet_number_space; return QuicPacketNumber(); } return largest_sent_retransmittable_packets_[packet_number_space]; } const QuicTransmissionInfo* QuicUnackedPacketMap::GetFirstInFlightTransmissionInfo() const { QUICHE_DCHECK(HasInFlightPackets()); for (auto it = begin(); it != end(); ++it) { if (it->in_flight) { return &(*it); } } QUICHE_DCHECK(false); return nullptr; } const QuicTransmissionInfo* QuicUnackedPacketMap::GetFirstInFlightTransmissionInfoOfSpace( PacketNumberSpace packet_number_space) const { for (auto it = begin(); it != end(); ++it) { if (it->in_flight && GetPacketNumberSpace(it->encryption_level) == packet_number_space) { return &(*it); } } return nullptr; } void QuicUnackedPacketMap::EnableMultiplePacketNumberSpacesSupport() { if (supports_multiple_packet_number_spaces_) { QUIC_BUG(quic_bug_10518_7) << "Multiple packet number spaces has already been enabled"; return; } if (largest_sent_packet_.IsInitialized()) { QUIC_BUG(quic_bug_10518_8) << "Try to enable multiple packet number spaces support after any " "packet has been sent."; return; } supports_multiple_packet_number_spaces_ = true; } int32_t QuicUnackedPacketMap::GetLastPacketContent() const { if (empty()) { return -1; } int32_t content = 0; const QuicTransmissionInfo& last_packet = unacked_packets_.back(); for (const auto& frame : last_packet.retransmittable_frames) { content |= GetFrameTypeBitfield(frame.type); } if (last_packet.largest_acked.IsInitialized()) { content |= GetFrameTypeBitfield(ACK_FRAME); } return content; } }
#include "quiche/quic/core/quic_unacked_packet_map.h" #include <cstddef> #include <limits> #include <vector> #include "absl/base/macros.h" #include "quiche/quic/core/frames/quic_stream_frame.h" #include "quiche/quic/core/quic_packet_number.h" #include "quiche/quic/core/quic_transmission_info.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/quic/test_tools/quic_unacked_packet_map_peer.h" using testing::_; using testing::Return; using testing::StrictMock; namespace quic { namespace test { namespace { const uint32_t kDefaultLength = 1000; class QuicUnackedPacketMapTest : public QuicTestWithParam<Perspective> { protected: QuicUnackedPacketMapTest() : unacked_packets_(GetParam()), now_(QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1000)) { unacked_packets_.SetSessionNotifier(&notifier_); EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(true)); EXPECT_CALL(notifier_, OnStreamFrameRetransmitted(_)) .Times(testing::AnyNumber()); } ~QuicUnackedPacketMapTest() override {} SerializedPacket CreateRetransmittablePacket(uint64_t packet_number) { return CreateRetransmittablePacketForStream( packet_number, QuicUtils::GetFirstBidirectionalStreamId( CurrentSupportedVersions()[0].transport_version, Perspective::IS_CLIENT)); } SerializedPacket CreateRetransmittablePacketForStream( uint64_t packet_number, QuicStreamId stream_id) { SerializedPacket packet(QuicPacketNumber(packet_number), PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength, false, false); QuicStreamFrame frame; frame.stream_id = stream_id; packet.retransmittable_frames.push_back(QuicFrame(frame)); return packet; } SerializedPacket CreateNonRetransmittablePacket(uint64_t packet_number) { return SerializedPacket(QuicPacketNumber(packet_number), PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength, false, false); } void VerifyInFlightPackets(uint64_t* packets, size_t num_packets) { unacked_packets_.RemoveObsoletePackets(); if (num_packets == 0) { EXPECT_FALSE(unacked_packets_.HasInFlightPackets()); EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets()); return; } if (num_packets == 1) { EXPECT_TRUE(unacked_packets_.HasInFlightPackets()); EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets()); ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[0]))); EXPECT_TRUE( unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[0])) .in_flight); } for (size_t i = 0; i < num_packets; ++i) { ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i]))); EXPECT_TRUE( unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[i])) .in_flight); } size_t in_flight_count = 0; for (auto it = unacked_packets_.begin(); it != unacked_packets_.end(); ++it) { if (it->in_flight) { ++in_flight_count; } } EXPECT_EQ(num_packets, in_flight_count); } void VerifyUnackedPackets(uint64_t* packets, size_t num_packets) { unacked_packets_.RemoveObsoletePackets(); if (num_packets == 0) { EXPECT_TRUE(unacked_packets_.empty()); EXPECT_FALSE(unacked_packets_.HasUnackedRetransmittableFrames()); return; } EXPECT_FALSE(unacked_packets_.empty()); for (size_t i = 0; i < num_packets; ++i) { EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i]))) << packets[i]; } EXPECT_EQ(num_packets, unacked_packets_.GetNumUnackedPacketsDebugOnly()); } void VerifyRetransmittablePackets(uint64_t* packets, size_t num_packets) { unacked_packets_.RemoveObsoletePackets(); size_t num_retransmittable_packets = 0; for (auto it = unacked_packets_.begin(); it != unacked_packets_.end(); ++it) { if (unacked_packets_.HasRetransmittableFrames(*it)) { ++num_retransmittable_packets; } } EXPECT_EQ(num_packets, num_retransmittable_packets); for (size_t i = 0; i < num_packets; ++i) { EXPECT_TRUE(unacked_packets_.HasRetransmittableFrames( QuicPacketNumber(packets[i]))) << " packets[" << i << "]:" << packets[i]; } } void UpdatePacketState(uint64_t packet_number, SentPacketState state) { unacked_packets_ .GetMutableTransmissionInfo(QuicPacketNumber(packet_number)) ->state = state; } void RetransmitAndSendPacket(uint64_t old_packet_number, uint64_t new_packet_number, TransmissionType transmission_type) { QUICHE_DCHECK(unacked_packets_.HasRetransmittableFrames( QuicPacketNumber(old_packet_number))); QuicTransmissionInfo* info = unacked_packets_.GetMutableTransmissionInfo( QuicPacketNumber(old_packet_number)); QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId( CurrentSupportedVersions()[0].transport_version, Perspective::IS_CLIENT); for (const auto& frame : info->retransmittable_frames) { if (frame.type == STREAM_FRAME) { stream_id = frame.stream_frame.stream_id; break; } } UpdatePacketState( old_packet_number, QuicUtils::RetransmissionTypeToPacketState(transmission_type)); info->first_sent_after_loss = QuicPacketNumber(new_packet_number); SerializedPacket packet( CreateRetransmittablePacketForStream(new_packet_number, stream_id)); unacked_packets_.AddSentPacket(&packet, transmission_type, now_, true, true, ECN_NOT_ECT); } QuicUnackedPacketMap unacked_packets_; QuicTime now_; StrictMock<MockSessionNotifier> notifier_; }; INSTANTIATE_TEST_SUITE_P(Tests, QuicUnackedPacketMapTest, ::testing::ValuesIn({Perspective::IS_CLIENT, Perspective::IS_SERVER}), ::testing::PrintToStringParamName()); TEST_P(QuicUnackedPacketMapTest, RttOnly) { SerializedPacket packet(CreateNonRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, false, true, ECN_NOT_ECT); uint64_t unacked[] = {1}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(nullptr, 0); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1)); VerifyUnackedPackets(nullptr, 0); VerifyInFlightPackets(nullptr, 0); VerifyRetransmittablePackets(nullptr, 0); } TEST_P(QuicUnackedPacketMapTest, RetransmittableInflightAndRtt) { SerializedPacket packet(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked[] = {1}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(unacked, ABSL_ARRAYSIZE(unacked)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1)); VerifyUnackedPackets(nullptr, 0); VerifyInFlightPackets(nullptr, 0); VerifyRetransmittablePackets(nullptr, 0); } TEST_P(QuicUnackedPacketMapTest, StopRetransmission) { const QuicStreamId stream_id = 2; SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id)); unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked[] = {1}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); uint64_t retransmittable[] = {1}; VerifyRetransmittablePackets(retransmittable, ABSL_ARRAYSIZE(retransmittable)); EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); } TEST_P(QuicUnackedPacketMapTest, StopRetransmissionOnOtherStream) { const QuicStreamId stream_id = 2; SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id)); unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked[] = {1}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); uint64_t retransmittable[] = {1}; VerifyRetransmittablePackets(retransmittable, ABSL_ARRAYSIZE(retransmittable)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(retransmittable, ABSL_ARRAYSIZE(retransmittable)); } TEST_P(QuicUnackedPacketMapTest, StopRetransmissionAfterRetransmission) { const QuicStreamId stream_id = 2; SerializedPacket packet1(CreateRetransmittablePacketForStream(1, stream_id)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION); uint64_t unacked[] = {1, 2}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); std::vector<uint64_t> retransmittable = {1, 2}; VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size()); EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); } TEST_P(QuicUnackedPacketMapTest, RetransmittedPacket) { SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION); uint64_t unacked[] = {1, 2}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); std::vector<uint64_t> retransmittable = {1, 2}; VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size()); EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2)); VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2)); uint64_t unacked2[] = {1}; VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2)); VerifyInFlightPackets(unacked2, ABSL_ARRAYSIZE(unacked2)); VerifyRetransmittablePackets(nullptr, 0); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1)); VerifyUnackedPackets(nullptr, 0); VerifyInFlightPackets(nullptr, 0); VerifyRetransmittablePackets(nullptr, 0); } TEST_P(QuicUnackedPacketMapTest, RetransmitThreeTimes) { SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); SerializedPacket packet2(CreateRetransmittablePacket(2)); unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked[] = {1, 2}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); uint64_t retransmittable[] = {1, 2}; VerifyRetransmittablePackets(retransmittable, ABSL_ARRAYSIZE(retransmittable)); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1)); RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION); SerializedPacket packet4(CreateRetransmittablePacket(4)); unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked2[] = {1, 3, 4}; VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2)); uint64_t pending2[] = {3, 4}; VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2)); std::vector<uint64_t> retransmittable2 = {1, 3, 4}; VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size()); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(4)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(4)); RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION); SerializedPacket packet6(CreateRetransmittablePacket(6)); unacked_packets_.AddSentPacket(&packet6, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); std::vector<uint64_t> unacked3 = {3, 5, 6}; std::vector<uint64_t> retransmittable3 = {3, 5, 6}; VerifyUnackedPackets(&unacked3[0], unacked3.size()); VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size()); uint64_t pending3[] = {3, 5, 6}; VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3)); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(6)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(6)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(6)); RetransmitAndSendPacket(5, 7, LOSS_RETRANSMISSION); std::vector<uint64_t> unacked4 = {3, 5, 7}; std::vector<uint64_t> retransmittable4 = {3, 5, 7}; VerifyUnackedPackets(&unacked4[0], unacked4.size()); VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size()); uint64_t pending4[] = {3, 5, 7}; VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5)); uint64_t pending5[] = {7}; VerifyInFlightPackets(pending5, ABSL_ARRAYSIZE(pending5)); } TEST_P(QuicUnackedPacketMapTest, RetransmitFourTimes) { SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); SerializedPacket packet2(CreateRetransmittablePacket(2)); unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked[] = {1, 2}; VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked)); VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked)); uint64_t retransmittable[] = {1, 2}; VerifyRetransmittablePackets(retransmittable, ABSL_ARRAYSIZE(retransmittable)); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1)); RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION); uint64_t unacked2[] = {1, 3}; VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2)); uint64_t pending2[] = {3}; VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2)); std::vector<uint64_t> retransmittable2 = {1, 3}; VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size()); RetransmitAndSendPacket(3, 4, PTO_RETRANSMISSION); SerializedPacket packet5(CreateRetransmittablePacket(5)); unacked_packets_.AddSentPacket(&packet5, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); uint64_t unacked3[] = {1, 3, 4, 5}; VerifyUnackedPackets(unacked3, ABSL_ARRAYSIZE(unacked3)); uint64_t pending3[] = {3, 4, 5}; VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3)); std::vector<uint64_t> retransmittable3 = {1, 3, 4, 5}; VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size()); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(5)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5)); unacked_packets_.RemoveRetransmittability(QuicPacketNumber(5)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3)); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4)); RetransmitAndSendPacket(4, 6, LOSS_RETRANSMISSION); std::vector<uint64_t> unacked4 = {4, 6}; VerifyUnackedPackets(&unacked4[0], unacked4.size()); uint64_t pending4[] = {6}; VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4)); std::vector<uint64_t> retransmittable4 = {4, 6}; VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size()); } TEST_P(QuicUnackedPacketMapTest, SendWithGap) { SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); SerializedPacket packet3(CreateRetransmittablePacket(3)); unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION); EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.GetLeastUnacked()); EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(1))); EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(2))); EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(3))); EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(4))); EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(5))); EXPECT_EQ(QuicPacketNumber(5u), unacked_packets_.largest_sent_packet()); } TEST_P(QuicUnackedPacketMapTest, AggregateContiguousAckedStreamFrames) { testing::InSequence s; EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero()); QuicTransmissionInfo info1; QuicStreamFrame stream_frame1(3, false, 0, 100); info1.retransmittable_frames.push_back(QuicFrame(stream_frame1)); QuicTransmissionInfo info2; QuicStreamFrame stream_frame2(3, false, 100, 100); info2.retransmittable_frames.push_back(QuicFrame(stream_frame2)); QuicTransmissionInfo info3; QuicStreamFrame stream_frame3(3, false, 200, 100); info3.retransmittable_frames.push_back(QuicFrame(stream_frame3)); QuicTransmissionInfo info4; QuicStreamFrame stream_frame4(3, true, 300, 0); info4.retransmittable_frames.push_back(QuicFrame(stream_frame4)); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.MaybeAggregateAckedStreamFrame( info1, QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.MaybeAggregateAckedStreamFrame( info2, QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.MaybeAggregateAckedStreamFrame( info3, QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1); unacked_packets_.MaybeAggregateAckedStreamFrame( info4, QuicTime::Delta::Zero(), QuicTime::Zero()); } TEST_P(QuicUnackedPacketMapTest, CannotAggregateIfDataLengthOverflow) { QuicByteCount kMaxAggregatedDataLength = std::numeric_limits<decltype(QuicStreamFrame().data_length)>::max(); QuicStreamId stream_id = 2; for (const QuicPacketLength acked_stream_length : {512, 1300}) { ++stream_id; QuicStreamOffset offset = 0; QuicByteCount aggregated_data_length = 0; while (offset < 1e6) { QuicTransmissionInfo info; QuicStreamFrame stream_frame(stream_id, false, offset, acked_stream_length); info.retransmittable_frames.push_back(QuicFrame(stream_frame)); const QuicStreamFrame& aggregated_stream_frame = QuicUnackedPacketMapPeer::GetAggregatedStreamFrame(unacked_packets_); if (aggregated_stream_frame.data_length + acked_stream_length <= kMaxAggregatedDataLength) { EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.MaybeAggregateAckedStreamFrame( info, QuicTime::Delta::Zero(), QuicTime::Zero()); aggregated_data_length += acked_stream_length; testing::Mock::VerifyAndClearExpectations(&notifier_); } else { EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1); unacked_packets_.MaybeAggregateAckedStreamFrame( info, QuicTime::Delta::Zero(), QuicTime::Zero()); aggregated_data_length = acked_stream_length; testing::Mock::VerifyAndClearExpectations(&notifier_); } EXPECT_EQ(aggregated_data_length, aggregated_stream_frame.data_length); offset += acked_stream_length; } QuicTransmissionInfo info; QuicStreamFrame stream_frame(stream_id, true, offset, acked_stream_length); info.retransmittable_frames.push_back(QuicFrame(stream_frame)); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1); unacked_packets_.MaybeAggregateAckedStreamFrame( info, QuicTime::Delta::Zero(), QuicTime::Zero()); testing::Mock::VerifyAndClearExpectations(&notifier_); } } TEST_P(QuicUnackedPacketMapTest, CannotAggregateAckedControlFrames) { testing::InSequence s; QuicWindowUpdateFrame window_update(1, 5, 100); QuicStreamFrame stream_frame1(3, false, 0, 100); QuicStreamFrame stream_frame2(3, false, 100, 100); QuicBlockedFrame blocked(2, 5, 0); QuicGoAwayFrame go_away(3, QUIC_PEER_GOING_AWAY, 5, "Going away."); QuicTransmissionInfo info1; info1.retransmittable_frames.push_back(QuicFrame(window_update)); info1.retransmittable_frames.push_back(QuicFrame(stream_frame1)); info1.retransmittable_frames.push_back(QuicFrame(stream_frame2)); QuicTransmissionInfo info2; info2.retransmittable_frames.push_back(QuicFrame(blocked)); info2.retransmittable_frames.push_back(QuicFrame(&go_away)); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1); unacked_packets_.MaybeAggregateAckedStreamFrame( info1, QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(3); unacked_packets_.MaybeAggregateAckedStreamFrame( info2, QuicTime::Delta::Zero(), QuicTime::Zero()); EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0); unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero()); } TEST_P(QuicUnackedPacketMapTest, LargestSentPacketMultiplePacketNumberSpaces) { unacked_packets_.EnableMultiplePacketNumberSpacesSupport(); EXPECT_FALSE( unacked_packets_ .GetLargestSentRetransmittableOfPacketNumberSpace(INITIAL_DATA) .IsInitialized()); SerializedPacket packet1(CreateRetransmittablePacket(1)); packet1.encryption_level = ENCRYPTION_INITIAL; unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.largest_sent_packet()); EXPECT_EQ(QuicPacketNumber(1), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( INITIAL_DATA)); EXPECT_FALSE( unacked_packets_ .GetLargestSentRetransmittableOfPacketNumberSpace(HANDSHAKE_DATA) .IsInitialized()); SerializedPacket packet2(CreateRetransmittablePacket(2)); packet2.encryption_level = ENCRYPTION_HANDSHAKE; unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); EXPECT_EQ(QuicPacketNumber(2u), unacked_packets_.largest_sent_packet()); EXPECT_EQ(QuicPacketNumber(1), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( INITIAL_DATA)); EXPECT_EQ(QuicPacketNumber(2), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( HANDSHAKE_DATA)); EXPECT_FALSE( unacked_packets_ .GetLargestSentRetransmittableOfPacketNumberSpace(APPLICATION_DATA) .IsInitialized()); SerializedPacket packet3(CreateRetransmittablePacket(3)); packet3.encryption_level = ENCRYPTION_ZERO_RTT; unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); EXPECT_EQ(QuicPacketNumber(3u), unacked_packets_.largest_sent_packet()); EXPECT_EQ(QuicPacketNumber(1), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( INITIAL_DATA)); EXPECT_EQ(QuicPacketNumber(2), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( HANDSHAKE_DATA)); EXPECT_EQ(QuicPacketNumber(3), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( APPLICATION_DATA)); EXPECT_EQ(QuicPacketNumber(3), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( APPLICATION_DATA)); SerializedPacket packet4(CreateRetransmittablePacket(4)); packet4.encryption_level = ENCRYPTION_FORWARD_SECURE; unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); EXPECT_EQ(QuicPacketNumber(4u), unacked_packets_.largest_sent_packet()); EXPECT_EQ(QuicPacketNumber(1), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( INITIAL_DATA)); EXPECT_EQ(QuicPacketNumber(2), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( HANDSHAKE_DATA)); EXPECT_EQ(QuicPacketNumber(4), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( APPLICATION_DATA)); EXPECT_EQ(QuicPacketNumber(4), unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace( APPLICATION_DATA)); EXPECT_TRUE(unacked_packets_.GetLastPacketContent() & (1 << STREAM_FRAME)); EXPECT_FALSE(unacked_packets_.GetLastPacketContent() & (1 << ACK_FRAME)); } TEST_P(QuicUnackedPacketMapTest, ReserveInitialCapacityTest) { QuicUnackedPacketMap unacked_packets(GetParam()); ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 0u); unacked_packets.ReserveInitialCapacity(16); QuicStreamId stream_id(1); SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id)); unacked_packets.AddSentPacket(&packet, TransmissionType::NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 16u); } TEST_P(QuicUnackedPacketMapTest, DebugString) { EXPECT_EQ(unacked_packets_.DebugString(), "{size: 0, least_unacked: 1, largest_sent_packet: uninitialized, " "largest_acked: uninitialized, bytes_in_flight: 0, " "packets_in_flight: 0}"); SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); EXPECT_EQ( unacked_packets_.DebugString(), "{size: 1, least_unacked: 1, largest_sent_packet: 1, largest_acked: " "uninitialized, bytes_in_flight: 1000, packets_in_flight: 1}"); SerializedPacket packet2(CreateRetransmittablePacket(2)); unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1)); unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1)); unacked_packets_.RemoveObsoletePackets(); EXPECT_EQ( unacked_packets_.DebugString(), "{size: 1, least_unacked: 2, largest_sent_packet: 2, largest_acked: 1, " "bytes_in_flight: 1000, packets_in_flight: 1}"); } TEST_P(QuicUnackedPacketMapTest, EcnInfoStored) { SerializedPacket packet1(CreateRetransmittablePacket(1)); unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true, ECN_NOT_ECT); SerializedPacket packet2(CreateRetransmittablePacket(2)); unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true, ECN_ECT0); SerializedPacket packet3(CreateRetransmittablePacket(3)); unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true, ECN_ECT1); EXPECT_EQ( unacked_packets_.GetTransmissionInfo(QuicPacketNumber(1)).ecn_codepoint, ECN_NOT_ECT); EXPECT_EQ( unacked_packets_.GetTransmissionInfo(QuicPacketNumber(2)).ecn_codepoint, ECN_ECT0); EXPECT_EQ( unacked_packets_.GetTransmissionInfo(QuicPacketNumber(3)).ecn_codepoint, ECN_ECT1); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
0a1cf107-125e-40dd-85f7-591c64563636
cpp
tensorflow/tensorflow
python_op_gen_annotator
tensorflow/python/framework/python_op_gen_annotator.cc
tensorflow/python/framework/python_op_gen_annotator_test.cc
#include "tensorflow/python/framework/python_op_gen_annotator.h" #include <cstdint> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "tensorflow/python/framework/kythe_metadata.pb.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" namespace tensorflow { namespace python_op_gen_internal { void GeneratedCodeAnnotator::AddAnnotation(const OpDef& op_def, absl::string_view function_name, uint32_t offset_start) { const uint32_t start_byte = base_pos_ + offset_start; const uint32_t end_byte = start_byte + function_name.size(); byte_offsets_map_[op_def.name()].generated_start = start_byte; byte_offsets_map_[op_def.name()].generated_end = end_byte; } void GeneratedCodeAnnotator::FillSourceOffsets( const OpRegOffsets& op_reg_offsets) { for (const OpRegOffset& offset : op_reg_offsets.offsets()) { if (byte_offsets_map_.find(offset.name()) != byte_offsets_map_.end()) { byte_offsets_map_[offset.name()].file_path = offset.filepath(); byte_offsets_map_[offset.name()].source_start = offset.start(); byte_offsets_map_[offset.name()].source_end = offset.end(); } } } string GeneratedCodeAnnotator::BuildKytheMetadata() { GeneratedCodeInfo generated_code_info; generated_code_info.set_type(GeneratedCodeInfo::KYTHE0); for (const auto& [name, offsets] : byte_offsets_map_) { if (offsets.file_path.empty()) { continue; } MappingRule* meta = generated_code_info.add_meta(); meta->set_type(MappingRule::ANCHOR_ANCHOR); meta->set_edge("/kythe/edge/imputes"); meta->set_source_begin(offsets.source_start); meta->set_source_end(offsets.source_end); meta->set_target_begin(offsets.generated_start); meta->set_target_end(offsets.generated_end); VName* vname = meta->mutable_source_vname(); vname->set_signature(absl::StrFormat( "@%d:%d@tensorflow_op#%s#%s#%s", offsets.source_start, offsets.source_end, name, kKytheCorpus, offsets.file_path)); vname->set_corpus(std::string(kKytheCorpus)); vname->set_path(offsets.file_path); vname->set_language("c++"); } return "# kythe.proto.metadata.GeneratedCodeInfo:" + absl::Base64Escape(generated_code_info.SerializeAsString()); } } }
#include "tensorflow/python/framework/python_op_gen_annotator.h" #include <utility> #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/python/framework/kythe_metadata.pb.h" namespace tensorflow { namespace python_op_gen_internal { namespace { using ::testing::StartsWith; GeneratedCodeInfo ParseMetadata(string metadata) { GeneratedCodeInfo generated_code_info; std::pair<string, string> p = absl::StrSplit(metadata, ':'); string serialized_generated_code_info; absl::Base64Unescape(p.second, &serialized_generated_code_info); generated_code_info.ParseFromString(serialized_generated_code_info); return generated_code_info; } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithoutSourceOffsets) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); annotator.AddAnnotation(fakeOpDef, "fake_op", 0); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); GeneratedCodeInfo expected; ASSERT_TRUE(protobuf::TextFormat::ParseFromString("type: KYTHE0", &expected)); EXPECT_EQ(actual.SerializeAsString(), expected.SerializeAsString()); } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsets) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } )pb", &fakeOffsets)); annotator.AddAnnotation(fakeOpDef, "fake_op", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR); EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes"); EXPECT_EQ( actual.meta(0).source_vname().signature(), absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc", kKytheCorpus)); EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc"); EXPECT_EQ(actual.meta(0).source_begin(), 7); EXPECT_EQ(actual.meta(0).source_end(), 11); EXPECT_EQ(actual.meta(0).target_begin(), 100); EXPECT_EQ(actual.meta(0).target_end(), 107); } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsetsAndNonZeroBase) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } )pb", &fakeOffsets)); annotator.SetBase(10); annotator.AddAnnotation(fakeOpDef, "fake_op", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR); EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes"); EXPECT_EQ( actual.meta(0).source_vname().signature(), absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc", kKytheCorpus)); EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc"); EXPECT_EQ(actual.meta(0).source_begin(), 7); EXPECT_EQ(actual.meta(0).source_end(), 11); EXPECT_EQ(actual.meta(0).target_begin(), 110); EXPECT_EQ(actual.meta(0).target_end(), 117); } TEST(PythonOpGenAnnotatorTest, AddMultipleAnnotation) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op_1", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } offsets { name: "fake_op_2", filepath: "file/path/to/fake_op.cc", start: 101, end: 103, } )pb", &fakeOffsets)); fakeOpDef.set_name("fake_op_1"); annotator.AddAnnotation(fakeOpDef, "fake_op_1", 10); fakeOpDef.set_name("fake_op_2"); annotator.AddAnnotation(fakeOpDef, "fake_op_2", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta_size(), 2); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
70f4d2fa-0102-416a-8f90-3068250b5d23
cpp
google/tensorstore
nditerable_transformed_array
tensorstore/internal/nditerable_transformed_array.cc
tensorstore/internal/nditerable_transformed_array_test.cc
#include "tensorstore/internal/nditerable_transformed_array.h" #include <cassert> #include <cstddef> #include <memory> #include <utility> #include <vector> #include "absl/status/status.h" #include "tensorstore/array.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/index_space/index_transform.h" #include "tensorstore/index_space/internal/iterate_impl.h" #include "tensorstore/index_space/internal/transform_rep.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/integer_overflow.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_array.h" #include "tensorstore/internal/nditerable_array_util.h" #include "tensorstore/internal/nditerable_util.h" #include "tensorstore/internal/unique_with_intrusive_allocator.h" #include "tensorstore/strided_layout.h" #include "tensorstore/util/byte_strided_pointer.h" #include "tensorstore/util/element_pointer.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" namespace tensorstore { namespace internal { namespace input_dim_iter_flags = internal_index_space::input_dimension_iteration_flags; namespace { class IterableImpl : public NDIterable::Base<IterableImpl> { public: IterableImpl(IndexTransform<> transform, allocator_type allocator) : transform_(std::move(transform)), input_dimension_flags_(transform_.input_rank(), input_dim_iter_flags::can_skip, allocator) {} allocator_type get_allocator() const override { return input_dimension_flags_.get_allocator(); } int GetDimensionOrder(DimensionIndex dim_i, DimensionIndex dim_j) const override { auto flags_i = input_dimension_flags_[dim_i]; if ((flags_i & input_dim_iter_flags::array_indexed) != (input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) { return (flags_i & input_dim_iter_flags::array_indexed) ? -2 : 2; } if (flags_i & input_dim_iter_flags::array_indexed) { for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions; ++i) { const int order = GetDimensionOrderFromByteStrides( state_.index_array_byte_strides[i][dim_i], state_.index_array_byte_strides[i][dim_j]); if (order != 0) return order; } } return GetDimensionOrderFromByteStrides(state_.input_byte_strides[dim_i], state_.input_byte_strides[dim_j]); } void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override { const DimensionIndex input_rank = transform_.input_rank(); for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions; ++i) { UpdateDirectionPrefsFromByteStrides( tensorstore::span(state_.index_array_byte_strides[i], input_rank), prefs); } UpdateDirectionPrefsFromByteStrides( tensorstore::span(&state_.input_byte_strides[0], input_rank), prefs); } bool CanCombineDimensions(DimensionIndex dim_i, int dir_i, DimensionIndex dim_j, int dir_j, Index size_j) const override { auto flags_i = input_dimension_flags_[dim_i]; if ((flags_i & input_dim_iter_flags::array_indexed) != (input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) { return false; } if (flags_i & input_dim_iter_flags::array_indexed) { for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions; ++i) { if (!CanCombineStridedArrayDimensions( state_.index_array_byte_strides[i][dim_i], dir_i, state_.index_array_byte_strides[i][dim_j], dir_j, size_j)) { return false; } } } return CanCombineStridedArrayDimensions( state_.input_byte_strides[dim_i], dir_i, state_.input_byte_strides[dim_j], dir_j, size_j); } DataType dtype() const override { return dtype_; } IterationBufferConstraint GetIterationBufferConstraint( IterationLayoutView layout) const override { const DimensionIndex penultimate_dim = layout.iteration_dimensions[layout.iteration_dimensions.size() - 2]; const DimensionIndex last_dim = layout.iteration_dimensions[layout.iteration_dimensions.size() - 1]; if ((last_dim == -1 || (input_dimension_flags_[last_dim] & input_dim_iter_flags::array_indexed) == 0) && (penultimate_dim == -1 || (input_dimension_flags_[penultimate_dim] & input_dim_iter_flags::array_indexed) == 0)) { return {(last_dim == -1 || state_.input_byte_strides[last_dim] * layout.directions[last_dim] == this->dtype_->size) ? IterationBufferKind::kContiguous : IterationBufferKind::kStrided, false}; } else { return {IterationBufferKind::kIndexed, false}; } } std::ptrdiff_t GetWorkingMemoryBytesPerElement( IterationLayoutView layout, IterationBufferKind buffer_kind) const override { return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0; } NDIterator::Ptr GetIterator( NDIterable::IterationBufferKindLayoutView layout) const override { return MakeUniqueWithVirtualIntrusiveAllocator<IteratorImpl>( get_allocator(), this, layout); } class IteratorImpl : public NDIterator::Base<IteratorImpl> { public: IteratorImpl(const IterableImpl* iterable, NDIterable::IterationBufferKindLayoutView layout, allocator_type allocator) : num_index_arrays_( iterable->state_.num_array_indexed_output_dimensions), num_index_array_iteration_dims_(0), iterable_(iterable), buffer_( num_index_arrays_ + layout.iteration_rank() * (num_index_arrays_ + 1) + ((layout.buffer_kind == IterationBufferKind::kIndexed) ? layout.block_shape[0] * layout.block_shape[1] : 0), allocator) { static_assert(sizeof(Index) >= sizeof(void*)); for (DimensionIndex j = 0; j < num_index_arrays_; ++j) { ByteStridedPointer<const Index> index_array_pointer = iterable->state_.index_array_pointers[j].get(); for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) { if (layout.directions[dim] != -1) continue; const Index size_minus_1 = layout.shape[dim] - 1; const Index index_array_byte_stride = iterable->state_.index_array_byte_strides[j][dim]; index_array_pointer += wrap_on_overflow::Multiply(index_array_byte_stride, size_minus_1); } buffer_[j] = reinterpret_cast<Index>(index_array_pointer.get()); } Index base_offset = 0; for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) { if (layout.directions[dim] != -1) continue; const Index size_minus_1 = layout.shape[dim] - 1; const Index input_byte_stride = iterable->state_.input_byte_strides[dim]; base_offset = wrap_on_overflow::Add( base_offset, wrap_on_overflow::Multiply(input_byte_stride, size_minus_1)); } for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) { const DimensionIndex dim = layout.iteration_dimensions[i]; if (dim == -1) { for (DimensionIndex j = 0; j < num_index_arrays_ + 1; ++j) { buffer_[num_index_arrays_ + layout.iteration_rank() * j + i] = 0; } } else { const Index dir = layout.directions[dim]; const Index input_byte_stride = iterable->state_.input_byte_strides[dim]; buffer_[num_index_arrays_ + i] = wrap_on_overflow::Multiply(input_byte_stride, dir); if (iterable->input_dimension_flags_[dim] & input_dim_iter_flags::array_indexed) { num_index_array_iteration_dims_ = i + 1; for (DimensionIndex j = 0; j < num_index_arrays_; ++j) { const Index index_array_byte_stride = iterable->state_.index_array_byte_strides[j][dim]; buffer_[num_index_arrays_ + layout.iteration_rank() * (j + 1) + i] = wrap_on_overflow::Multiply(index_array_byte_stride, dir); } } } } if (layout.buffer_kind == IterationBufferKind::kIndexed) { Index* offsets_array = buffer_.data() + num_index_arrays_ + layout.iteration_rank() * (num_index_arrays_ + 1); pointer_ = IterationBufferPointer{iterable->state_.base_pointer + base_offset, layout.block_shape[1], offsets_array}; if (num_index_array_iteration_dims_ + 1 < layout.iteration_rank()) { FillOffsetsArrayFromStride( buffer_[num_index_arrays_ + layout.iteration_rank() - 2], buffer_[num_index_arrays_ + layout.iteration_rank() - 1], layout.block_shape[0], layout.block_shape[1], offsets_array); } } else { assert(num_index_array_iteration_dims_ + 1 < layout.iteration_rank()); pointer_ = IterationBufferPointer{ iterable->state_.base_pointer + base_offset, buffer_[num_index_arrays_ + layout.iteration_rank() - 2], buffer_[num_index_arrays_ + layout.iteration_rank() - 1]}; } } allocator_type get_allocator() const override { return buffer_.get_allocator(); } bool GetBlock(tensorstore::span<const Index> indices, IterationBufferShape block_shape, IterationBufferPointer* pointer, absl::Status* status) override { IterationBufferPointer block_pointer = pointer_; block_pointer.pointer += IndexInnerProduct( indices.size(), indices.data(), buffer_.data() + num_index_arrays_); if (num_index_array_iteration_dims_ + 1 < indices.size()) { for (DimensionIndex j = 0; j < num_index_arrays_; ++j) { const Index index = ByteStridedPointer<const Index>( reinterpret_cast<const Index*>(buffer_[j]))[IndexInnerProduct( num_index_array_iteration_dims_, indices.data(), buffer_.data() + num_index_arrays_ + indices.size() * (j + 1))]; block_pointer.pointer += wrap_on_overflow::Multiply( iterable_->state_.index_array_output_byte_strides[j], index); } } else { block_pointer.byte_offsets_outer_stride = block_shape[1]; Index* offsets_array = const_cast<Index*>(block_pointer.byte_offsets); FillOffsetsArrayFromStride( buffer_[num_index_arrays_ + indices.size() - 2], buffer_[num_index_arrays_ + indices.size() - 1], block_shape[0], block_shape[1], offsets_array); for (DimensionIndex j = 0; j < num_index_arrays_; ++j) { const Index* index_array_byte_strides = buffer_.data() + num_index_arrays_ + indices.size() * (j + 1); ByteStridedPointer<const Index> index_array_pointer = ByteStridedPointer<const Index>( reinterpret_cast<const Index*>(buffer_[j])) + IndexInnerProduct(indices.size() - 2, indices.data(), index_array_byte_strides); const Index output_byte_stride = iterable_->state_.index_array_output_byte_strides[j]; const Index penultimate_index_array_byte_stride = index_array_byte_strides[indices.size() - 2]; const Index last_index_array_byte_stride = index_array_byte_strides[indices.size() - 1]; if (last_index_array_byte_stride == 0 && penultimate_index_array_byte_stride == 0) { block_pointer.pointer += wrap_on_overflow::Multiply( output_byte_stride, *index_array_pointer); } else { Index block_start0 = indices[indices.size() - 2]; Index block_start1 = indices[indices.size() - 1]; for (Index outer = 0; outer < block_shape[0]; ++outer) { for (Index inner = 0; inner < block_shape[1]; ++inner) { Index cur_contribution = wrap_on_overflow::Multiply( output_byte_stride, index_array_pointer[wrap_on_overflow::Add( wrap_on_overflow::Multiply( outer + block_start0, penultimate_index_array_byte_stride), wrap_on_overflow::Multiply( inner + block_start1, last_index_array_byte_stride))]); auto& offset = offsets_array[outer * block_shape[1] + inner]; offset = wrap_on_overflow::Add(offset, cur_contribution); } } } } } *pointer = block_pointer; return true; } private: DimensionIndex num_index_arrays_; DimensionIndex num_index_array_iteration_dims_; const IterableImpl* iterable_; IterationBufferPointer pointer_; std::vector<Index, ArenaAllocator<Index>> buffer_; }; std::shared_ptr<const void> data_owner_; IndexTransform<> transform_; internal_index_space::SingleArrayIterationState state_; DataType dtype_; std::vector<input_dim_iter_flags::Bitmask, ArenaAllocator<input_dim_iter_flags::Bitmask>> input_dimension_flags_; }; Result<NDIterable::Ptr> MaybeConvertToArrayNDIterable( std::unique_ptr<IterableImpl, VirtualDestroyDeleter> impl, Arena* arena) { if (impl->state_.num_array_indexed_output_dimensions == 0) { return GetArrayNDIterable( SharedOffsetArrayView<const void>( SharedElementPointer<const void>( std::shared_ptr<const void>(std::move(impl->data_owner_), impl->state_.base_pointer), impl->dtype_), StridedLayoutView<>(impl->transform_.input_rank(), impl->transform_.input_shape().data(), &impl->state_.input_byte_strides[0])), arena); } return impl; } } Result<NDIterable::Ptr> GetTransformedArrayNDIterable( SharedOffsetArrayView<const void> array, IndexTransformView<> transform, Arena* arena) { if (!transform.valid()) { return GetArrayNDIterable(array, arena); } auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>( ArenaAllocator<>(arena), transform); TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState( array, internal_index_space::TransformAccess::rep(transform), transform.input_origin().data(), transform.input_shape().data(), &impl->state_, impl->input_dimension_flags_.data())); impl->dtype_ = array.dtype(); impl->data_owner_ = std::move(array.element_pointer().pointer()); return MaybeConvertToArrayNDIterable(std::move(impl), arena); } Result<NDIterable::Ptr> GetTransformedArrayNDIterable( TransformedArray<Shared<const void>> array, Arena* arena) { auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>( ArenaAllocator<>(arena), std::move(array.transform())); TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState( ElementPointer<const void>(array.element_pointer()), internal_index_space::TransformAccess::rep(impl->transform_), impl->transform_.input_origin().data(), impl->transform_.input_shape().data(), &impl->state_, impl->input_dimension_flags_.data())); impl->dtype_ = array.dtype(); impl->data_owner_ = std::move(array.element_pointer().pointer()); return MaybeConvertToArrayNDIterable(std::move(impl), arena); } } }
#include "tensorstore/internal/nditerable_transformed_array.h" #include <stddef.h> #include <array> #include <memory> #include <tuple> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorstore/array.h" #include "tensorstore/array_testutil.h" #include "tensorstore/contiguous_layout.h" #include "tensorstore/data_type.h" #include "tensorstore/index.h" #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/index_transform.h" #include "tensorstore/index_space/index_transform_builder.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_buffer_management.h" #include "tensorstore/strided_layout.h" #include "tensorstore/util/element_pointer.h" #include "tensorstore/util/iterate.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::AllocateArray; using ::tensorstore::Index; using ::tensorstore::IndexTransformBuilder; using ::tensorstore::kImplicit; using ::tensorstore::MakeArray; using ::tensorstore::MatchesStatus; using ::tensorstore::Result; using ::tensorstore::Shared; using ::tensorstore::SharedArray; using ::tensorstore::skip_repeated_elements; using ::tensorstore::StridedLayout; using ::tensorstore::TransformedArray; using ::tensorstore::internal::Arena; using ::tensorstore::internal::GetTransformedArrayNDIterable; using ::tensorstore::internal::IterationBufferKind; using ::tensorstore::internal::IterationBufferShape; using ::tensorstore::internal::MultiNDIterator; using ::tensorstore::internal::NDIterable; using ::testing::ElementsAre; using ::testing::ElementsAreArray; using ::testing::FieldsAre; using ::testing::Pair; using IterationTrace = std::vector<void*>; template <typename... Element> std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status> GetIterationTrace( MultiNDIterator<sizeof...(Element), true>* multi_iterator) { std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status> result; for (auto block_shape = multi_iterator->ResetAtBeginning(); block_shape[0] && block_shape[1]; block_shape = multi_iterator->StepForward(block_shape)) { if (!multi_iterator->GetBlock(block_shape, &result.second)) { break; } ptrdiff_t i = 0; const auto unused = {( [&] { const auto get_trace_func = [](void* ptr, IterationTrace* trace) { trace->push_back(ptr); }; tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func = tensorstore::internal::SimpleElementwiseFunction< decltype(get_trace_func)(Element), IterationTrace*>(); func[multi_iterator->buffer_kind](nullptr, block_shape, multi_iterator->block_pointers()[i], &result.first[i]); ++i; }(), 0)...}; (void)unused; } return result; } template <size_t N> using BlockTrace = std::vector<std::tuple<std::vector<Index>, IterationBufferShape, std::array<IterationTrace, N>>>; template <typename... Element> std::pair<BlockTrace<sizeof...(Element)>, absl::Status> GetBlockTrace( MultiNDIterator<sizeof...(Element), true>* multi_iterator) { std::pair<BlockTrace<sizeof...(Element)>, absl::Status> result; for (auto block_shape = multi_iterator->ResetAtBeginning(); block_shape[0] && block_shape[1]; block_shape = multi_iterator->StepForward(block_shape)) { if (!multi_iterator->GetBlock(block_shape, &result.second)) { break; } auto& [position, shape, traces] = result.first.emplace_back(); position.assign(multi_iterator->position().begin(), multi_iterator->position().end()); shape = block_shape; ptrdiff_t i = 0; const auto unused = {( [&, traces_ptr = &traces[i]] { const auto get_trace_func = [](void* ptr, IterationTrace* trace) { trace->push_back(ptr); }; tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func = tensorstore::internal::SimpleElementwiseFunction< decltype(get_trace_func)(Element), IterationTrace*>(); func[multi_iterator->buffer_kind](nullptr, block_shape, multi_iterator->block_pointers()[i], traces_ptr); ++i; }(), 0)...}; (void)unused; } return result; } class MaybeDirectTest : public ::testing::TestWithParam<bool> { protected: Arena arena; Result<NDIterable::Ptr> GetMaybeDirectTransformedArrayNDIterable( tensorstore::SharedOffsetArrayView<const void> array, tensorstore::IndexTransformView<> transform) { if (GetParam()) { TENSORSTORE_ASSIGN_OR_RETURN(auto transformed_array, MakeTransformedArray(array, transform)); return GetTransformedArrayNDIterable(std::move(transformed_array), &arena); } else { return GetTransformedArrayNDIterable(std::move(array), transform, &arena); } } }; INSTANTIATE_TEST_SUITE_P(Indirect, MaybeDirectTest, ::testing::Values(true)); INSTANTIATE_TEST_SUITE_P(Direct, MaybeDirectTest, ::testing::Values(false)); TEST(NDIterableTransformedArrayTest, Strided) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1)); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, SingleIndexedDimension) { Arena arena; auto a = AllocateArray<int>({4}); auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice( MakeArray<Index>({1, 2, 3, 0}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); EXPECT_EQ(tensorstore::dtype_v<int>, iterable->dtype()); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0)); EXPECT_THAT(GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(1), &a(2), &a(3), &a(0))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, OneStridedOneIndexedDimensionIndexedBuffer) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0)); EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2)); EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2), &a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, TwoStridedOneIndexedDimensionContiguousBuffer) { Arena arena; auto a = AllocateArray<int>({2, 3, 2}); auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2)); EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2)); EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAreArray( { &a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1), &a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1), &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1), &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1) })), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, TwoStridedOneIndexedDimensionStridedBuffer) { Arena arena; auto a = AllocateArray<int>({2, 3, 4}); auto ta = (a | tensorstore::Dims(2).Stride(2) | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2)); EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2)); EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAreArray( { &a(0, 0, 0), &a(0, 0, 2), &a(1, 0, 0), &a(1, 0, 2), &a(0, 2, 0), &a(0, 2, 2), &a(1, 2, 0), &a(1, 2, 2), &a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2), &a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2) })), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, TwoStridedOneIndexedDimensionIndexedBuffer) { Arena arena; auto a = AllocateArray<int>({2, 3, 2}); auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1}))) .value(); auto tb = (a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})) | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1}))) .value(); auto iterable1 = GetTransformedArrayNDIterable(ta, &arena).value(); auto iterable2 = GetTransformedArrayNDIterable(tb, &arena).value(); MultiNDIterator<2, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable1.get(), iterable2.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2)); EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2)); EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind); auto element_matcher = ElementsAreArray( { &a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1), &a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1), &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1), &a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1) }); EXPECT_THAT( (GetIterationTrace<int, int>(&multi_iterator)), Pair(ElementsAre(element_matcher, element_matcher), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedAndReversedStrided) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 2, 1, 1})) | tensorstore::Dims(0).SizedInterval(kImplicit, kImplicit, -1)) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0)); EXPECT_THAT(multi_iterator.directions, ElementsAre(-1, 1)); EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2)); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2), &a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedCombine) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({{0, 2}, {2, 0}}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0)); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2), &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedCombinePartiallyReversed) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1) .OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}})) .SizedInterval(kImplicit, kImplicit, {1, -1})) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0)); EXPECT_THAT(multi_iterator.directions, ElementsAre(1, 1, -1)); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2), &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedCombineBothReversed) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(1) .OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}})) .SizedInterval(kImplicit, kImplicit, -1)) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0)); EXPECT_THAT(multi_iterator.directions, ElementsAre(1, -1, -1)); EXPECT_THAT( GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2), &a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedVsStrided) { Arena arena; auto a = AllocateArray<int>({2, 2}); auto b = AllocateArray<int>({2, 3}); auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2}))) .value(); auto iterable_a = GetTransformedArrayNDIterable(a, &arena).value(); auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value(); MultiNDIterator<2, true> multi_iterator( tb.shape(), skip_repeated_elements, {{iterable_a.get(), iterable_b.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0)); EXPECT_THAT( (GetIterationTrace<int, int>(&multi_iterator)), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 1), &a(1, 1)), ElementsAre(&b(0, 0), &b(1, 0), &b(0, 2), &b(1, 2))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IndexedWith2StridedDims) { Arena arena; auto a = AllocateArray<int>({2, 2, 3}); auto ta = (a | tensorstore::Dims(1).MoveToFront() | tensorstore::Dims(2).OuterIndexArraySlice(MakeArray<Index>({0, 2, 1}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0)); EXPECT_THAT(GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre( &a(0, 0, 0), &a(0, 1, 0), &a(1, 0, 0), &a(1, 1, 0), &a(0, 0, 2), &a(0, 1, 2), &a(1, 0, 2), &a(1, 1, 2), &a(0, 0, 1), &a(0, 1, 1), &a(1, 0, 1), &a(1, 1, 1))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, TwoIndexedDims) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1, 1})) | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1)); EXPECT_THAT(GetIterationTrace<int>(&multi_iterator), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2), &a(1, 0), &a(1, 2))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, FourIndexedDims) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice( MakeArray<Index>({{0, 1}, {1, 1}})) | tensorstore::Dims(-1).OuterIndexArraySlice( MakeArray<Index>({{0, 2}, {1, 0}}))) .value(); auto b = AllocateArray<int>({2, 2, 2, 2}); auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value(); auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value(); MultiNDIterator<2, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable_a.get(), iterable_b.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3)); EXPECT_THAT( (GetIterationTrace<int, int>(&multi_iterator)), Pair( ElementsAre( ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)), ElementsAre( b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3, b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7, b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11, b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, LastTwoDimsStrided) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice( MakeArray<Index>({{0, 1}, {1, 1}})) | tensorstore::Dims(-1).OuterIndexArraySlice( MakeArray<Index>({{0, 2}, {1, 0}}))) .value(); auto b = AllocateArray<int>({2, 2, 2, 2}); auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value(); auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value(); MultiNDIterator<2, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable_a.get(), iterable_b.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3)); EXPECT_THAT( (GetIterationTrace<int, int>(&multi_iterator)), Pair( ElementsAre( ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0), &a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)), ElementsAre( b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3, b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7, b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11, b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, TwoTransformedArrays) { Arena arena; auto a = AllocateArray<int>({2, 3}); auto b = AllocateArray<int>({2, 3}); auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1}))) .value(); auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice( MakeArray<Index>({0, 1, 2}))) .value(); auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value(); auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value(); MultiNDIterator<2, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable_a.get(), iterable_b.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1)); EXPECT_THAT((GetIterationTrace<int, int>(&multi_iterator)), Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 1), &a(0, 2), &a(1, 0), &a(1, 1), &a(1, 2)), ElementsAre(&b(0, 0), &b(0, 1), &b(0, 2), &b(1, 0), &b(1, 1), &b(1, 2))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, ZeroRankIndexArray) { Arena arena; SharedArray<const Index> index_array{std::make_shared<Index>(3), StridedLayout<>({5}, {0})}; int data[100]; TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto transform, IndexTransformBuilder(1, 1) .input_shape({5}) .output_index_array(0, sizeof(int) * 2, sizeof(int) * 4, index_array) .Finalize()); auto iterable_a = GetTransformedArrayNDIterable( {tensorstore::UnownedToShared( tensorstore::ElementPointer<int>(&data[0])), transform}, &arena) .value(); MultiNDIterator<1, true> multi_iterator( transform.input_shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, -1)); EXPECT_THAT( (GetIterationTrace<int>(&multi_iterator)), Pair(ElementsAre(ElementsAre(&data[4 * 3 + 2])), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, OutOfBoundsConstant) { Arena arena; auto a = AllocateArray<int>({5}); auto transform = IndexTransformBuilder<1, 1>() .input_shape({5}) .output_constant(0, 8) .Finalize() .value(); EXPECT_THAT( GetTransformedArrayNDIterable(a, transform, &arena), MatchesStatus(absl::StatusCode::kOutOfRange, "Checking bounds of constant output index map for " "dimension 0: Index 8 is outside valid range \\[0, 5\\)")); } TEST(NDIterableTransformedArrayTest, NullTransform) { Arena arena; auto a = AllocateArray<int>({5}); auto iterable_a = GetTransformedArrayNDIterable(a, {}, &arena).value(); EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype()); MultiNDIterator<1, true> multi_iterator( a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0)); EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)), Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, IdentityTransform) { Arena arena; auto a = AllocateArray<int>({5}); auto iterable_a = GetTransformedArrayNDIterable( a, tensorstore::IdentityTransform(tensorstore::span<const Index>({5})), &arena) .value(); EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype()); MultiNDIterator<1, true> multi_iterator( a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0)); EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)), Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, OutOfBoundsSingleInputDimension) { Arena arena; auto a = AllocateArray<int>({5}); auto transform = IndexTransformBuilder<1, 1>() .input_shape({5}) .output_single_input_dimension(0, 2, 1, 0) .Finalize() .value(); EXPECT_THAT(GetTransformedArrayNDIterable(a, transform, &arena), MatchesStatus(absl::StatusCode::kOutOfRange, "Output dimension 0 range of \\[2, 7\\) is not " "contained within array domain of \\[0, 5\\)")); } TEST_P(MaybeDirectTest, OutOfBoundsIndexArray) { auto a = AllocateArray<int>({5}); auto transform = IndexTransformBuilder<1, 1>() .input_shape({5}) .output_index_array(0, 2, 1, MakeArray<Index>({0, 0, 0, 0, 42})) .Finalize() .value(); EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform), MatchesStatus(absl::StatusCode::kOutOfRange, ".*Index 42 is outside valid range \\[-2, 3\\)")); } TEST_P(MaybeDirectTest, OutOfBoundsSingletonIndexArray) { SharedArray<const Index> index_array{std::make_shared<Index>(42), StridedLayout<>({5}, {0})}; auto a = AllocateArray<int>({5}); auto transform = IndexTransformBuilder<1, 1>() .input_shape({5}) .output_index_array(0, 2, 1, index_array) .Finalize() .value(); EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform), MatchesStatus(absl::StatusCode::kOutOfRange, ".*Index 42 is outside valid range \\[-2, 3\\)")); } TEST(NDIterableTransformedArrayTest, BlockTraceThreeStridedDimensions) { Arena arena; auto a = AllocateArray<int>({2, 5, 3}); auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); MultiNDIterator<1, true> multi_iterator( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena); EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind); EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1, 2)); EXPECT_THAT( GetBlockTrace<int>(&multi_iterator), Pair(ElementsAre(FieldsAre(ElementsAre(0, 0, 0), ElementsAre(2, 3), ElementsAre(ElementsAreArray({ &a(0, 0, 0), &a(0, 0, 1), &a(0, 0, 2), &a(0, 2, 0), &a(0, 2, 1), &a(0, 2, 2), }))), FieldsAre(ElementsAre(1, 0, 0), ElementsAre(2, 3), ElementsAre(ElementsAreArray({ &a(1, 0, 0), &a(1, 0, 1), &a(1, 0, 2), &a(1, 2, 0), &a(1, 2, 1), &a(1, 2, 2), })))), absl::OkStatus())); } TEST(NDIterableTransformedArrayTest, InnermostBlockSizeLessThanInnermostIterationSize) { Arena arena; auto a = AllocateArray<int>({2, 32768}, tensorstore::c_order, tensorstore::value_init); auto ta = (a | tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({0, 1}))) .value(); auto iterable = GetTransformedArrayNDIterable(ta, &arena).value(); struct IncrementValue { void operator()(int* x) const { *x += 1; } }; constexpr tensorstore::internal::ElementwiseFunction<1> increment_value_func = tensorstore::internal::SimpleElementwiseFunction<IncrementValue(int)>(); TENSORSTORE_ASSERT_OK( (tensorstore::internal::IterateOverNDIterables<1, true>( ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena, {&increment_value_func, nullptr}))); EXPECT_THAT(a, tensorstore::MatchesArray( tensorstore::BroadcastArray( tensorstore::MakeScalarArray<int>(1), a.shape()) .value())); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array_test.cc
4f887a6430414cd6088e1743555015b10f116d50
03d1f556-a605-4615-a23e-2c5adb2cbce3
cpp
google/quiche
quic_idle_network_detector
quiche/quic/core/quic_idle_network_detector.cc
quiche/quic/core/quic_idle_network_detector_test.cc
#include "quiche/quic/core/quic_idle_network_detector.h" #include <algorithm> #include "quiche/quic/core/quic_constants.h" #include "quiche/quic/core/quic_time.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/common/platform/api/quiche_logging.h" namespace quic { namespace { } QuicIdleNetworkDetector::QuicIdleNetworkDetector(Delegate* delegate, QuicTime now, QuicAlarm* alarm) : delegate_(delegate), start_time_(now), handshake_timeout_(QuicTime::Delta::Infinite()), time_of_last_received_packet_(now), time_of_first_packet_sent_after_receiving_(QuicTime::Zero()), idle_network_timeout_(QuicTime::Delta::Infinite()), alarm_(*alarm) {} void QuicIdleNetworkDetector::OnAlarm() { if (handshake_timeout_.IsInfinite()) { delegate_->OnIdleNetworkDetected(); return; } if (idle_network_timeout_.IsInfinite()) { delegate_->OnHandshakeTimeout(); return; } if (last_network_activity_time() + idle_network_timeout_ > start_time_ + handshake_timeout_) { delegate_->OnHandshakeTimeout(); return; } delegate_->OnIdleNetworkDetected(); } void QuicIdleNetworkDetector::SetTimeouts( QuicTime::Delta handshake_timeout, QuicTime::Delta idle_network_timeout) { handshake_timeout_ = handshake_timeout; idle_network_timeout_ = idle_network_timeout; SetAlarm(); } void QuicIdleNetworkDetector::StopDetection() { alarm_.PermanentCancel(); handshake_timeout_ = QuicTime::Delta::Infinite(); idle_network_timeout_ = QuicTime::Delta::Infinite(); handshake_timeout_ = QuicTime::Delta::Infinite(); stopped_ = true; } void QuicIdleNetworkDetector::OnPacketSent(QuicTime now, QuicTime::Delta pto_delay) { if (time_of_first_packet_sent_after_receiving_ > time_of_last_received_packet_) { return; } time_of_first_packet_sent_after_receiving_ = std::max(time_of_first_packet_sent_after_receiving_, now); if (shorter_idle_timeout_on_sent_packet_) { MaybeSetAlarmOnSentPacket(pto_delay); return; } SetAlarm(); } void QuicIdleNetworkDetector::OnPacketReceived(QuicTime now) { time_of_last_received_packet_ = std::max(time_of_last_received_packet_, now); SetAlarm(); } void QuicIdleNetworkDetector::SetAlarm() { if (stopped_) { QUIC_BUG(quic_idle_detector_set_alarm_after_stopped) << "SetAlarm called after stopped"; return; } QuicTime new_deadline = QuicTime::Zero(); if (!handshake_timeout_.IsInfinite()) { new_deadline = start_time_ + handshake_timeout_; } if (!idle_network_timeout_.IsInfinite()) { const QuicTime idle_network_deadline = GetIdleNetworkDeadline(); if (new_deadline.IsInitialized()) { new_deadline = std::min(new_deadline, idle_network_deadline); } else { new_deadline = idle_network_deadline; } } alarm_.Update(new_deadline, kAlarmGranularity); } void QuicIdleNetworkDetector::MaybeSetAlarmOnSentPacket( QuicTime::Delta pto_delay) { QUICHE_DCHECK(shorter_idle_timeout_on_sent_packet_); if (!handshake_timeout_.IsInfinite() || !alarm_.IsSet()) { SetAlarm(); return; } const QuicTime deadline = alarm_.deadline(); const QuicTime min_deadline = last_network_activity_time() + pto_delay; if (deadline > min_deadline) { return; } alarm_.Update(min_deadline, kAlarmGranularity); } QuicTime QuicIdleNetworkDetector::GetIdleNetworkDeadline() const { if (idle_network_timeout_.IsInfinite()) { return QuicTime::Zero(); } return last_network_activity_time() + idle_network_timeout_; } }
#include "quiche/quic/core/quic_idle_network_detector.h" #include "quiche/quic/core/quic_connection_alarms.h" #include "quiche/quic/core/quic_one_block_arena.h" #include "quiche/quic/core/quic_time.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/mock_quic_connection_alarms.h" #include "quiche/quic/test_tools/quic_test_utils.h" namespace quic { namespace test { class QuicIdleNetworkDetectorTestPeer { public: static QuicAlarm& GetAlarm(QuicIdleNetworkDetector* detector) { return detector->alarm_; } }; namespace { class MockDelegate : public QuicIdleNetworkDetector::Delegate { public: MOCK_METHOD(void, OnHandshakeTimeout, (), (override)); MOCK_METHOD(void, OnIdleNetworkDetected, (), (override)); }; class QuicIdleNetworkDetectorTest : public QuicTest { public: QuicIdleNetworkDetectorTest() : alarms_(&connection_alarms_delegate_, alarm_factory_, arena_), detector_(&delegate_, clock_.Now() + QuicTimeDelta::FromSeconds(1), &alarms_.idle_network_detector_alarm()) { clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1)); alarm_ = static_cast<MockAlarmFactory::TestAlarm*>( &alarms_.idle_network_detector_alarm()); ON_CALL(connection_alarms_delegate_, OnIdleDetectorAlarm()) .WillByDefault([&] { detector_.OnAlarm(); }); } protected: testing::StrictMock<MockDelegate> delegate_; MockConnectionAlarmsDelegate connection_alarms_delegate_; QuicConnectionArena arena_; MockAlarmFactory alarm_factory_; QuicConnectionAlarms alarms_; MockClock clock_; QuicIdleNetworkDetector detector_; MockAlarmFactory::TestAlarm* alarm_; }; TEST_F(QuicIdleNetworkDetectorTest, IdleNetworkDetectedBeforeHandshakeCompletes) { EXPECT_FALSE(alarm_->IsSet()); detector_.SetTimeouts( QuicTime::Delta::FromSeconds(30), QuicTime::Delta::FromSeconds(20)); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(20)); EXPECT_CALL(delegate_, OnIdleNetworkDetected()); alarm_->Fire(); } TEST_F(QuicIdleNetworkDetectorTest, HandshakeTimeout) { EXPECT_FALSE(alarm_->IsSet()); detector_.SetTimeouts( QuicTime::Delta::FromSeconds(30), QuicTime::Delta::FromSeconds(20)); EXPECT_TRUE(alarm_->IsSet()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15)); detector_.OnPacketReceived(clock_.Now()); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(15), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15)); EXPECT_CALL(delegate_, OnHandshakeTimeout()); alarm_->Fire(); } TEST_F(QuicIdleNetworkDetectorTest, IdleNetworkDetectedAfterHandshakeCompletes) { EXPECT_FALSE(alarm_->IsSet()); detector_.SetTimeouts( QuicTime::Delta::FromSeconds(30), QuicTime::Delta::FromSeconds(20)); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200)); detector_.OnPacketReceived(clock_.Now()); detector_.SetTimeouts( QuicTime::Delta::Infinite(), QuicTime::Delta::FromSeconds(600)); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600)); EXPECT_CALL(delegate_, OnIdleNetworkDetected()); alarm_->Fire(); } TEST_F(QuicIdleNetworkDetectorTest, DoNotExtendIdleDeadlineOnConsecutiveSentPackets) { EXPECT_FALSE(alarm_->IsSet()); detector_.SetTimeouts( QuicTime::Delta::FromSeconds(30), QuicTime::Delta::FromSeconds(20)); EXPECT_TRUE(alarm_->IsSet()); clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200)); detector_.OnPacketReceived(clock_.Now()); detector_.SetTimeouts( QuicTime::Delta::Infinite(), QuicTime::Delta::FromSeconds(600)); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200)); detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero()); const QuicTime packet_sent_time = clock_.Now(); EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200)); detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero()); EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600) - QuicTime::Delta::FromMilliseconds(200)); EXPECT_CALL(delegate_, OnIdleNetworkDetected()); alarm_->Fire(); } TEST_F(QuicIdleNetworkDetectorTest, ShorterIdleTimeoutOnSentPacket) { detector_.enable_shorter_idle_timeout_on_sent_packet(); QuicTime::Delta idle_network_timeout = QuicTime::Delta::Zero(); idle_network_timeout = QuicTime::Delta::FromSeconds(30); detector_.SetTimeouts( QuicTime::Delta::Infinite(), idle_network_timeout); EXPECT_TRUE(alarm_->IsSet()); const QuicTime deadline = alarm_->deadline(); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30), deadline); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15)); detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2)); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(deadline, alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(14)); detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2)); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(deadline, alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1)); detector_.OnPacketReceived(clock_.Now()); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30), alarm_->deadline()); clock_.AdvanceTime(QuicTime::Delta::FromSeconds(29)); detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2)); EXPECT_TRUE(alarm_->IsSet()); EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(2), alarm_->deadline()); } TEST_F(QuicIdleNetworkDetectorTest, NoAlarmAfterStopped) { detector_.StopDetection(); EXPECT_QUIC_BUG( detector_.SetTimeouts( QuicTime::Delta::FromSeconds(30), QuicTime::Delta::FromSeconds(20)), "SetAlarm called after stopped"); EXPECT_FALSE(alarm_->IsSet()); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
32f38623-1d99-4128-9f44-0ec7c874d2fa
cpp
abseil/abseil-cpp
log_entry
absl/log/log_entry.cc
absl/log/log_entry_test.cc
#include "absl/log/log_entry.h" #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int LogEntry::kNoVerbosityLevel; constexpr int LogEntry::kNoVerboseLevel; #endif #ifdef __APPLE__ namespace log_internal { extern const char kAvoidEmptyLogEntryLibraryWarning; const char kAvoidEmptyLogEntryLibraryWarning = 0; } #endif ABSL_NAMESPACE_END }
#include "absl/log/log_entry.h" #include <stddef.h> #include <stdint.h> #include <cstring> #include <limits> #include <string> #include <type_traits> #include <utility> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/log_severity.h" #include "absl/log/internal/append_truncated.h" #include "absl/log/internal/log_format.h" #include "absl/log/internal/test_helpers.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/time/civil_time.h" #include "absl/time/time.h" #include "absl/types/span.h" namespace { using ::absl::log_internal::LogEntryTestPeer; using ::testing::Eq; using ::testing::IsTrue; using ::testing::StartsWith; using ::testing::StrEq; auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment( new absl::log_internal::LogTestEnvironment); } namespace absl { ABSL_NAMESPACE_BEGIN namespace log_internal { class LogEntryTestPeer { public: LogEntryTestPeer(absl::string_view base_filename, int line, bool prefix, absl::LogSeverity severity, absl::string_view timestamp, absl::LogEntry::tid_t tid, PrefixFormat format, absl::string_view text_message) : format_{format}, buf_(15000, '\0') { entry_.base_filename_ = base_filename; entry_.line_ = line; entry_.prefix_ = prefix; entry_.severity_ = severity; std::string time_err; EXPECT_THAT( absl::ParseTime("%Y-%m-%d%ET%H:%M:%E*S", timestamp, absl::LocalTimeZone(), &entry_.timestamp_, &time_err), IsTrue()) << "Failed to parse time " << timestamp << ": " << time_err; entry_.tid_ = tid; std::pair<absl::string_view, std::string> timestamp_bits = absl::StrSplit(timestamp, absl::ByChar('.')); EXPECT_THAT(absl::ParseCivilTime(timestamp_bits.first, &ci_.cs), IsTrue()) << "Failed to parse time " << timestamp_bits.first; timestamp_bits.second.resize(9, '0'); int64_t nanos = 0; EXPECT_THAT(absl::SimpleAtoi(timestamp_bits.second, &nanos), IsTrue()) << "Failed to parse time " << timestamp_bits.first; ci_.subsecond = absl::Nanoseconds(nanos); absl::Span<char> view = absl::MakeSpan(buf_); view.remove_suffix(2); entry_.prefix_len_ = entry_.prefix_ ? log_internal::FormatLogPrefix( entry_.log_severity(), entry_.timestamp(), entry_.tid(), entry_.source_basename(), entry_.source_line(), format_, view) : 0; EXPECT_THAT(entry_.prefix_len_, Eq(static_cast<size_t>(view.data() - buf_.data()))); log_internal::AppendTruncated(text_message, view); view = absl::Span<char>(view.data(), view.size() + 2); view[0] = '\n'; view[1] = '\0'; view.remove_prefix(2); buf_.resize(static_cast<size_t>(view.data() - buf_.data())); entry_.text_message_with_prefix_and_newline_and_nul_ = absl::MakeSpan(buf_); } LogEntryTestPeer(const LogEntryTestPeer&) = delete; LogEntryTestPeer& operator=(const LogEntryTestPeer&) = delete; std::string FormatLogMessage() const { return log_internal::FormatLogMessage( entry_.log_severity(), ci_.cs, ci_.subsecond, entry_.tid(), entry_.source_basename(), entry_.source_line(), format_, entry_.text_message()); } std::string FormatPrefixIntoSizedBuffer(size_t sz) { std::string str(sz, '\0'); absl::Span<char> buf(&str[0], str.size()); const size_t prefix_size = log_internal::FormatLogPrefix( entry_.log_severity(), entry_.timestamp(), entry_.tid(), entry_.source_basename(), entry_.source_line(), format_, buf); EXPECT_THAT(prefix_size, Eq(static_cast<size_t>(buf.data() - str.data()))); str.resize(prefix_size); return str; } const absl::LogEntry& entry() const { return entry_; } private: absl::LogEntry entry_; PrefixFormat format_; absl::TimeZone::CivilInfo ci_; std::vector<char> buf_; }; } ABSL_NAMESPACE_END } namespace { constexpr bool kUsePrefix = true, kNoPrefix = false; TEST(LogEntryTest, Baseline) { LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", 451, absl::log_internal::PrefixFormat::kNotRaw, "hello world"); EXPECT_THAT(entry.FormatLogMessage(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678900 451 foo.cc:1234] ")); for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); } TEST(LogEntryTest, NoPrefix) { LogEntryTestPeer entry("foo.cc", 1234, kNoPrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", 451, absl::log_internal::PrefixFormat::kNotRaw, "hello world"); EXPECT_THAT(entry.FormatLogMessage(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678900 451 foo.cc:1234] ")); for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), Eq("hello world\n")); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("hello world\n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("hello world")); EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); } TEST(LogEntryTest, EmptyFields) { LogEntryTestPeer entry("", 0, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05", 0, absl::log_internal::PrefixFormat::kNotRaw, ""); const std::string format_message = entry.FormatLogMessage(); EXPECT_THAT(format_message, Eq("I0102 03:04:05.000000 0 :0] ")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq(format_message)); for (size_t sz = format_message.size() + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT(format_message, StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.000000 0 :0] \n")); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.000000 0 :0] \n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.000000 0 :0] ")); EXPECT_THAT(entry.entry().text_message(), Eq("")); } TEST(LogEntryTest, NegativeFields) { if (std::is_signed<absl::LogEntry::tid_t>::value) { LogEntryTestPeer entry( "foo.cc", -1234, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", static_cast<absl::LogEntry::tid_t>(-451), absl::log_internal::PrefixFormat::kNotRaw, "hello world"); EXPECT_THAT(entry.FormatLogMessage(), Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] ")); for (size_t sz = strlen("I0102 03:04:05.678900 -451 foo.cc:-1234] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT("I0102 03:04:05.678900 -451 foo.cc:-1234] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world")); EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); } else { LogEntryTestPeer entry("foo.cc", -1234, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", 451, absl::log_internal::PrefixFormat::kNotRaw, "hello world"); EXPECT_THAT(entry.FormatLogMessage(), Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678900 451 foo.cc:-1234] ")); for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:-1234] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:-1234] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world")); EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); } } TEST(LogEntryTest, LongFields) { LogEntryTestPeer entry( "I am the very model of a modern Major-General / " "I've information vegetable, animal, and mineral.", 2147483647, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.678967896789", 2147483647, absl::log_internal::PrefixFormat::kNotRaw, "I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical."); EXPECT_THAT(entry.FormatLogMessage(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] ")); for (size_t sz = strlen("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT( "I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT( entry.entry().text_message(), Eq("I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical.")); } TEST(LogEntryTest, LongNegativeFields) { if (std::is_signed<absl::LogEntry::tid_t>::value) { LogEntryTestPeer entry( "I am the very model of a modern Major-General / " "I've information vegetable, animal, and mineral.", -2147483647, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.678967896789", static_cast<absl::LogEntry::tid_t>(-2147483647), absl::log_internal::PrefixFormat::kNotRaw, "I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical."); EXPECT_THAT( entry.FormatLogMessage(), Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ")); for (size_t sz = strlen( "I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT( "I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT( entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT( entry.entry().text_message(), Eq("I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical.")); } else { LogEntryTestPeer entry( "I am the very model of a modern Major-General / " "I've information vegetable, animal, and mineral.", -2147483647, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.678967896789", 2147483647, absl::log_internal::PrefixFormat::kNotRaw, "I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical."); EXPECT_THAT( entry.FormatLogMessage(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ")); for (size_t sz = strlen( "I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT( "I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.\n")); EXPECT_THAT( entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678967 2147483647 I am the very model of a " "modern Major-General / I've information vegetable, animal, " "and mineral.:-2147483647] I know the kings of England, and I " "quote the fights historical / From Marathon to Waterloo, in " "order categorical.")); EXPECT_THAT( entry.entry().text_message(), Eq("I know the kings of England, and I quote the fights historical / " "From Marathon to Waterloo, in order categorical.")); } } TEST(LogEntryTest, Raw) { LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789", 451, absl::log_internal::PrefixFormat::kRaw, "hello world"); EXPECT_THAT( entry.FormatLogMessage(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world")); EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ")); for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ") + 20; sz != std::numeric_limits<size_t>::max(); sz--) EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ", StartsWith(entry.FormatPrefixIntoSizedBuffer(sz))); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n")); EXPECT_THAT( entry.entry().text_message_with_prefix_and_newline_c_str(), StrEq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n")); EXPECT_THAT( entry.entry().text_message_with_prefix(), Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world")); EXPECT_THAT(entry.entry().text_message(), Eq("hello world")); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
d37b53b4-31a7-4b63-b4fa-2549c355aa9f
cpp
google/cel-cpp
type_introspector
common/type_introspector.cc
extensions/protobuf/type_introspector_test.cc
#include "common/type_introspector.h" #include <algorithm> #include <cstdint> #include <initializer_list> #include "absl/container/flat_hash_map.h" #include "absl/container/inlined_vector.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "common/memory.h" #include "common/type.h" #include "common/types/thread_compatible_type_introspector.h" namespace cel { namespace { common_internal::BasicStructTypeField MakeBasicStructTypeField( absl::string_view name, Type type, int32_t number) { return common_internal::BasicStructTypeField(name, number, type); } struct FieldNameComparer { using is_transparent = void; bool operator()(const common_internal::BasicStructTypeField& lhs, const common_internal::BasicStructTypeField& rhs) const { return (*this)(lhs.name(), rhs.name()); } bool operator()(const common_internal::BasicStructTypeField& lhs, absl::string_view rhs) const { return (*this)(lhs.name(), rhs); } bool operator()(absl::string_view lhs, const common_internal::BasicStructTypeField& rhs) const { return (*this)(lhs, rhs.name()); } bool operator()(absl::string_view lhs, absl::string_view rhs) const { return lhs < rhs; } }; struct FieldNumberComparer { using is_transparent = void; bool operator()(const common_internal::BasicStructTypeField& lhs, const common_internal::BasicStructTypeField& rhs) const { return (*this)(lhs.number(), rhs.number()); } bool operator()(const common_internal::BasicStructTypeField& lhs, int64_t rhs) const { return (*this)(lhs.number(), rhs); } bool operator()(int64_t lhs, const common_internal::BasicStructTypeField& rhs) const { return (*this)(lhs, rhs.number()); } bool operator()(int64_t lhs, int64_t rhs) const { return lhs < rhs; } }; struct WellKnownType { WellKnownType( const Type& type, std::initializer_list<common_internal::BasicStructTypeField> fields) : type(type), fields_by_name(fields), fields_by_number(fields) { std::sort(fields_by_name.begin(), fields_by_name.end(), FieldNameComparer{}); std::sort(fields_by_number.begin(), fields_by_number.end(), FieldNumberComparer{}); } explicit WellKnownType(const Type& type) : WellKnownType(type, {}) {} Type type; absl::InlinedVector<common_internal::BasicStructTypeField, 2> fields_by_name; absl::InlinedVector<common_internal::BasicStructTypeField, 2> fields_by_number; absl::optional<StructTypeField> FieldByName(absl::string_view name) const { auto it = std::lower_bound(fields_by_name.begin(), fields_by_name.end(), name, FieldNameComparer{}); if (it == fields_by_name.end() || it->name() != name) { return absl::nullopt; } return *it; } absl::optional<StructTypeField> FieldByNumber(int64_t number) const { auto it = std::lower_bound(fields_by_number.begin(), fields_by_number.end(), number, FieldNumberComparer{}); if (it == fields_by_number.end() || it->number() != number) { return absl::nullopt; } return *it; } }; using WellKnownTypesMap = absl::flat_hash_map<absl::string_view, WellKnownType>; const WellKnownTypesMap& GetWellKnownTypesMap() { static const WellKnownTypesMap* types = []() -> WellKnownTypesMap* { WellKnownTypesMap* types = new WellKnownTypesMap(); types->insert_or_assign( "google.protobuf.BoolValue", WellKnownType{BoolWrapperType{}, {MakeBasicStructTypeField("value", BoolType{}, 1)}}); types->insert_or_assign( "google.protobuf.Int32Value", WellKnownType{IntWrapperType{}, {MakeBasicStructTypeField("value", IntType{}, 1)}}); types->insert_or_assign( "google.protobuf.Int64Value", WellKnownType{IntWrapperType{}, {MakeBasicStructTypeField("value", IntType{}, 1)}}); types->insert_or_assign( "google.protobuf.UInt32Value", WellKnownType{UintWrapperType{}, {MakeBasicStructTypeField("value", UintType{}, 1)}}); types->insert_or_assign( "google.protobuf.UInt64Value", WellKnownType{UintWrapperType{}, {MakeBasicStructTypeField("value", UintType{}, 1)}}); types->insert_or_assign( "google.protobuf.FloatValue", WellKnownType{DoubleWrapperType{}, {MakeBasicStructTypeField("value", DoubleType{}, 1)}}); types->insert_or_assign( "google.protobuf.DoubleValue", WellKnownType{DoubleWrapperType{}, {MakeBasicStructTypeField("value", DoubleType{}, 1)}}); types->insert_or_assign( "google.protobuf.StringValue", WellKnownType{StringWrapperType{}, {MakeBasicStructTypeField("value", StringType{}, 1)}}); types->insert_or_assign( "google.protobuf.BytesValue", WellKnownType{BytesWrapperType{}, {MakeBasicStructTypeField("value", BytesType{}, 1)}}); types->insert_or_assign( "google.protobuf.Duration", WellKnownType{DurationType{}, {MakeBasicStructTypeField("seconds", IntType{}, 1), MakeBasicStructTypeField("nanos", IntType{}, 2)}}); types->insert_or_assign( "google.protobuf.Timestamp", WellKnownType{TimestampType{}, {MakeBasicStructTypeField("seconds", IntType{}, 1), MakeBasicStructTypeField("nanos", IntType{}, 2)}}); types->insert_or_assign( "google.protobuf.Value", WellKnownType{ DynType{}, {MakeBasicStructTypeField("null_value", NullType{}, 1), MakeBasicStructTypeField("number_value", DoubleType{}, 2), MakeBasicStructTypeField("string_value", StringType{}, 3), MakeBasicStructTypeField("bool_value", BoolType{}, 4), MakeBasicStructTypeField("struct_value", JsonMapType(), 5), MakeBasicStructTypeField("list_value", ListType{}, 6)}}); types->insert_or_assign( "google.protobuf.ListValue", WellKnownType{ListType{}, {MakeBasicStructTypeField("values", ListType{}, 1)}}); types->insert_or_assign( "google.protobuf.Struct", WellKnownType{JsonMapType(), {MakeBasicStructTypeField("fields", JsonMapType(), 1)}}); types->insert_or_assign( "google.protobuf.Any", WellKnownType{AnyType{}, {MakeBasicStructTypeField("type_url", StringType{}, 1), MakeBasicStructTypeField("value", BytesType{}, 2)}}); types->insert_or_assign("null_type", WellKnownType{NullType{}}); types->insert_or_assign("google.protobuf.NullValue", WellKnownType{NullType{}}); types->insert_or_assign("bool", WellKnownType{BoolType{}}); types->insert_or_assign("int", WellKnownType{IntType{}}); types->insert_or_assign("uint", WellKnownType{UintType{}}); types->insert_or_assign("double", WellKnownType{DoubleType{}}); types->insert_or_assign("bytes", WellKnownType{BytesType{}}); types->insert_or_assign("string", WellKnownType{StringType{}}); types->insert_or_assign("list", WellKnownType{ListType{}}); types->insert_or_assign("map", WellKnownType{MapType{}}); types->insert_or_assign("type", WellKnownType{TypeType{}}); return types; }(); return *types; } } absl::StatusOr<absl::optional<Type>> TypeIntrospector::FindType( TypeFactory& type_factory, absl::string_view name) const { const auto& well_known_types = GetWellKnownTypesMap(); if (auto it = well_known_types.find(name); it != well_known_types.end()) { return it->second.type; } return FindTypeImpl(type_factory, name); } absl::StatusOr<absl::optional<TypeIntrospector::EnumConstant>> TypeIntrospector::FindEnumConstant(TypeFactory& type_factory, absl::string_view type, absl::string_view value) const { if (type == "google.protobuf.NullValue" && value == "NULL_VALUE") { return EnumConstant{NullType{}, "google.protobuf.NullValue", "NULL_VALUE", 0}; } return FindEnumConstantImpl(type_factory, type, value); } absl::StatusOr<absl::optional<StructTypeField>> TypeIntrospector::FindStructTypeFieldByName(TypeFactory& type_factory, absl::string_view type, absl::string_view name) const { const auto& well_known_types = GetWellKnownTypesMap(); if (auto it = well_known_types.find(type); it != well_known_types.end()) { return it->second.FieldByName(name); } return FindStructTypeFieldByNameImpl(type_factory, type, name); } absl::StatusOr<absl::optional<Type>> TypeIntrospector::FindTypeImpl( TypeFactory&, absl::string_view) const { return absl::nullopt; } absl::StatusOr<absl::optional<TypeIntrospector::EnumConstant>> TypeIntrospector::FindEnumConstantImpl(TypeFactory&, absl::string_view, absl::string_view) const { return absl::nullopt; } absl::StatusOr<absl::optional<StructTypeField>> TypeIntrospector::FindStructTypeFieldByNameImpl(TypeFactory&, absl::string_view, absl::string_view) const { return absl::nullopt; } Shared<TypeIntrospector> NewThreadCompatibleTypeIntrospector( MemoryManagerRef memory_manager) { return memory_manager .MakeShared<common_internal::ThreadCompatibleTypeIntrospector>(); } }
#include "extensions/protobuf/type_introspector.h" #include "absl/types/optional.h" #include "common/type.h" #include "common/type_kind.h" #include "common/type_testing.h" #include "internal/testing.h" #include "proto/test/v1/proto2/test_all_types.pb.h" #include "google/protobuf/descriptor.h" namespace cel::extensions { namespace { using ::absl_testing::IsOkAndHolds; using ::google::api::expr::test::v1::proto2::TestAllTypes; using ::testing::Eq; using ::testing::Optional; class ProtoTypeIntrospectorTest : public common_internal::ThreadCompatibleTypeTest<> { private: Shared<TypeIntrospector> NewTypeIntrospector( MemoryManagerRef memory_manager) override { return memory_manager.MakeShared<ProtoTypeIntrospector>(); } }; TEST_P(ProtoTypeIntrospectorTest, FindType) { EXPECT_THAT( type_manager().FindType(TestAllTypes::descriptor()->full_name()), IsOkAndHolds(Optional(Eq(MessageType(TestAllTypes::GetDescriptor()))))); EXPECT_THAT(type_manager().FindType("type.that.does.not.Exist"), IsOkAndHolds(Eq(absl::nullopt))); } TEST_P(ProtoTypeIntrospectorTest, FindStructTypeFieldByName) { ASSERT_OK_AND_ASSIGN( auto field, type_manager().FindStructTypeFieldByName( TestAllTypes::descriptor()->full_name(), "single_int32")); ASSERT_TRUE(field.has_value()); EXPECT_THAT(field->name(), Eq("single_int32")); EXPECT_THAT(field->number(), Eq(1)); EXPECT_THAT( type_manager().FindStructTypeFieldByName( TestAllTypes::descriptor()->full_name(), "field_that_does_not_exist"), IsOkAndHolds(Eq(absl::nullopt))); EXPECT_THAT(type_manager().FindStructTypeFieldByName( "type.that.does.not.Exist", "does_not_matter"), IsOkAndHolds(Eq(absl::nullopt))); } TEST_P(ProtoTypeIntrospectorTest, FindEnumConstant) { ProtoTypeIntrospector introspector; const auto* enum_desc = TestAllTypes::NestedEnum_descriptor(); ASSERT_OK_AND_ASSIGN( auto enum_constant, introspector.FindEnumConstant( type_manager(), "google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum", "BAZ")); ASSERT_TRUE(enum_constant.has_value()); EXPECT_EQ(enum_constant->type.kind(), TypeKind::kEnum); EXPECT_EQ(enum_constant->type_full_name, enum_desc->full_name()); EXPECT_EQ(enum_constant->value_name, "BAZ"); EXPECT_EQ(enum_constant->number, 2); } TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantNull) { ProtoTypeIntrospector introspector; ASSERT_OK_AND_ASSIGN( auto enum_constant, introspector.FindEnumConstant(type_manager(), "google.protobuf.NullValue", "NULL_VALUE")); ASSERT_TRUE(enum_constant.has_value()); EXPECT_EQ(enum_constant->type.kind(), TypeKind::kNull); EXPECT_EQ(enum_constant->type_full_name, "google.protobuf.NullValue"); EXPECT_EQ(enum_constant->value_name, "NULL_VALUE"); EXPECT_EQ(enum_constant->number, 0); } TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownEnum) { ProtoTypeIntrospector introspector; ASSERT_OK_AND_ASSIGN( auto enum_constant, introspector.FindEnumConstant(type_manager(), "NotARealEnum", "BAZ")); EXPECT_FALSE(enum_constant.has_value()); } TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownValue) { ProtoTypeIntrospector introspector; ASSERT_OK_AND_ASSIGN( auto enum_constant, introspector.FindEnumConstant( type_manager(), "google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum", "QUX")); ASSERT_FALSE(enum_constant.has_value()); } INSTANTIATE_TEST_SUITE_P( ProtoTypeIntrospectorTest, ProtoTypeIntrospectorTest, ::testing::Values(MemoryManagement::kPooling, MemoryManagement::kReferenceCounting), ProtoTypeIntrospectorTest::ToString); } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/type_introspector.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/type_introspector_test.cc
4552db5798fb0853b131b783d8875794334fae7f
7ab47ab0-9fed-413f-9b03-23b0997a2c1f
cpp
tensorflow/tensorflow
nest_gemm_fusion
third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc
third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc
#include "xla/service/gpu/transforms/nest_gemm_fusion.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "llvm/ADT/SmallVector.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/hlo_dce.h" #include "xla/service/instruction_fusion.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { absl::Status FuseInstructionsForConsumer( const std::vector<HloInstruction*>& instructions, HloInstruction& consumer) { HloComputation::Builder builder(instructions.back()->name()); absl::flat_hash_map<const HloInstruction*, HloInstruction*> old_to_new_mapping; std::vector<HloInstruction*> parameters; auto add_parameter = [&](HloInstruction* instruction) -> void { int param_index = parameters.size(); old_to_new_mapping[instruction] = builder.AddInstruction(HloInstruction::CreateParameter( param_index, instruction->shape(), absl::StrCat("parameter_", param_index))); parameters.push_back(instruction); }; for (HloInstruction* instruction : instructions) { if (old_to_new_mapping.contains(instruction)) { continue; } if (instruction->opcode() == HloOpcode::kParameter) { add_parameter(instruction); continue; } std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : instruction->mutable_operands()) { if (!old_to_new_mapping.contains(operand)) { add_parameter(operand); } new_operands.push_back(old_to_new_mapping[operand]); } old_to_new_mapping[instruction] = builder.AddInstruction( instruction->CloneWithNewOperands(instruction->shape(), new_operands)); } HloInstruction* old_root = instructions.back(); old_to_new_mapping[old_root]->MarkAsRoot(); HloComputation* computation = old_root->GetModule()->AddComputationAndUnifyNamesAndIds( builder.Build(), false); HloInstruction* fusion = old_root->parent()->AddInstruction(HloInstruction::CreateFusion( old_root->shape(), HloInstruction::FusionKind::kCustom, parameters, computation)); fusion->GetModule()->SetAndUniquifyInstrName(fusion, "block_fusion"); TF_ASSIGN_OR_RETURN(auto gpu_config, fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonFusionKind)); TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config)); for (int64_t operand_index : consumer.OperandIndices(old_root)) { TF_RETURN_IF_ERROR(consumer.ReplaceOperandWith(operand_index, fusion)); } return absl::OkStatus(); } absl::Status AnnotateDotOperandNestedFusionImpl( HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config, absl::Span<const int64_t> contracting_dimensions, absl::Span<const int64_t> batch_dimensions, int64_t contracting_dim_size, int64_t non_contracting_dim_size) { if (contracting_dimensions.size() != 1) { return absl::InternalError( absl::StrCat("Expected a single lhs contracting dimension but got ", contracting_dimensions.size())); } TF_ASSIGN_OR_RETURN( std::vector<int64_t> non_contracting_dimensions, GetNonContractingDims(dot.operand(0)->shape(), batch_dimensions, contracting_dimensions)); if (non_contracting_dimensions.size() != 1) { return absl::InternalError( absl::StrCat("Expected a single non-contracting dimension but got ", non_contracting_dimensions.size())); } std::vector<int64_t> output_tile_sizes(dot.operand(0)->shape().rank(), 1); output_tile_sizes[contracting_dimensions[0]] = contracting_dim_size; output_tile_sizes[non_contracting_dimensions[0]] = non_contracting_dim_size; BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes = std::move(output_tile_sizes); TF_ASSIGN_OR_RETURN(auto backend_config, nested_fusion.backend_config<GpuBackendConfig>()); *backend_config.mutable_fusion_backend_config() ->mutable_block_level_fusion_config() = block_level_parameters.ToBlockLevelFusionConfig(); TF_RETURN_IF_ERROR(nested_fusion.set_backend_config(backend_config)); return absl::OkStatus(); } absl::Status AnnotateDotLhsNestedFusion(HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); return AnnotateDotOperandNestedFusionImpl( nested_fusion, dot, config, dimension_numbers.lhs_contracting_dimensions(), dimension_numbers.lhs_batch_dimensions(), config.block_k, config.block_m); } absl::Status AnnotateDotRhsNestedFusion(HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); return AnnotateDotOperandNestedFusionImpl( nested_fusion, dot, config, dimension_numbers.rhs_contracting_dimensions(), dimension_numbers.rhs_batch_dimensions(), config.block_k, config.block_n); } absl::StatusOr<llvm::SmallVector<int64_t>> FindOutputTileSizesForEpilogue( const SymbolicTiledHloInstruction& tiled_dot, const SymbolicTileAnalysis& analysis, const TritonGemmConfig& config) { int64_t dot_rank = tiled_dot.symbolic_tile().tile_map().GetDimensionCount(); llvm::SmallVector<int64_t> expected_dot_tile_sizes(dot_rank, 1); expected_dot_tile_sizes[dot_rank - 2] = config.block_m; expected_dot_tile_sizes[dot_rank - 1] = config.block_n; llvm::SmallVector<int64_t> output_tile_sizes = expected_dot_tile_sizes; std::sort(output_tile_sizes.begin(), output_tile_sizes.end()); do { TF_ASSIGN_OR_RETURN( bool parameters_satisfy_constraints, analysis.ParametersSatisfyConstraints(output_tile_sizes)); if (!parameters_satisfy_constraints) { continue; } auto mapped_dot_tile_sizes = tiled_dot.TileSizes(output_tile_sizes); if (mapped_dot_tile_sizes == expected_dot_tile_sizes) { return output_tile_sizes; } } while (std::next_permutation(output_tile_sizes.begin(), output_tile_sizes.end())); return absl::InternalError(absl::StrCat( "Couldn't find output tile sizes that satisfy ", tiled_dot.ToString())); } absl::StatusOr<TritonGemmConfig> GetTritonGemmConfig( const HloFusionInstruction& fusion) { TF_ASSIGN_OR_RETURN(auto gpu_config, fusion.backend_config<GpuBackendConfig>()); const FusionBackendConfig& backend_config = gpu_config.fusion_backend_config(); if (!backend_config.has_triton_gemm_config()) { return absl::InternalError( "The fusion's backend config doesn't have a triton_gemm_config."); } return TritonGemmConfig::FromProto(backend_config.triton_gemm_config()); } absl::Status MakeNestedFusionFromGemmFusion( HloFusionInstruction* fusion, const TritonGemmConfig& config, const SymbolicTileAnalysis& analysis, const SymbolicTiledHloInstruction& tiled_dot, HloDotInstruction* dot) { DCHECK(GetTritonGemmConfig(*fusion).value() == config); DCHECK_EQ(tiled_dot.hlo(), dot); HloComputation* computation = fusion->called_computation(); TF_RETURN_IF_ERROR(FuseInstructionsForConsumer( computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(0)), *dot)); TF_RETURN_IF_ERROR(AnnotateDotLhsNestedFusion( *::xla::Cast<HloFusionInstruction>(dot->mutable_operand(0)), *dot, config)); TF_RETURN_IF_ERROR(FuseInstructionsForConsumer( computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(1)), *dot)); TF_RETURN_IF_ERROR(AnnotateDotRhsNestedFusion( *::xla::Cast<HloFusionInstruction>(dot->mutable_operand(1)), *dot, config)); TF_ASSIGN_OR_RETURN([[maybe_unused]] bool changed, HloDCE::RunOnComputation( computation, false)); TF_ASSIGN_OR_RETURN( llvm::SmallVector<int64_t> output_tile_sizes, FindOutputTileSizesForEpilogue(tiled_dot, analysis, config)); TF_ASSIGN_OR_RETURN(auto gpu_config, fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonFusionKind)); BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes.assign(output_tile_sizes.begin(), output_tile_sizes.end()); *backend_config.mutable_block_level_fusion_config() = block_level_parameters.ToBlockLevelFusionConfig(); TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config)); return absl::OkStatus(); } size_t GetDotCount(HloComputation* computation) { return absl::c_count_if(computation->instructions(), [](HloInstruction* hlo) { return hlo->opcode() == HloOpcode::kDot; }); } class NestGemmFusionVisitor : public DfsHloRewriteVisitor { public: explicit NestGemmFusionVisitor(mlir::MLIRContext* ctx) : ctx_(ctx) {} absl::Status HandleFusion(HloInstruction* instruction) override { HloFusionInstruction* fusion = Cast<HloFusionInstruction>(instruction); absl::StatusOr<TritonGemmConfig> config = GetTritonGemmConfig(*fusion); if (!config.ok()) { return absl::OkStatus(); } HloComputation* computation = fusion->called_computation(); HloInstruction* dot = hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot); if (dot == nullptr) { return absl::OkStatus(); } DCHECK_EQ(GetDotCount(computation), 1) << "Fusion has more than one dot."; SymbolicTileAnalysisOrError analysis_or = SymbolicTileAnalysis::AnalyzeComputation( *fusion->called_computations()[0], ctx_); if (std::holds_alternative<FusionDecision>(analysis_or)) { return absl::InternalError( absl::StrCat("Failed to analyze the computation (", std::get<FusionDecision>(analysis_or).Explain(), "): ", fusion->called_computation()->ToString())); } auto& analysis = std::get<SymbolicTileAnalysis>(analysis_or); auto tiled_dot_it = absl::c_find_if( analysis.GetSymbolicTiledHloComputation(), [&](const auto& tiled_hlo) { return tiled_hlo->hlo() == dot; }); if (tiled_dot_it == analysis.GetSymbolicTiledHloComputation().end()) { return absl::InternalError(absl::StrCat( "Couldn't find a symbolic tiled instruction for ", dot->ToString())); } TF_RETURN_IF_ERROR(MakeNestedFusionFromGemmFusion( fusion, config.value(), analysis, **tiled_dot_it, Cast<HloDotInstruction>(dot))); this->MarkAsChanged(); return absl::OkStatus(); } private: mlir::MLIRContext* ctx_; }; } absl::StatusOr<bool> NestGemmFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; mlir::MLIRContext ctx; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { NestGemmFusionVisitor visitor(&ctx); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); changed |= visitor.changed(); } return changed; } }
#include "xla/service/gpu/transforms/nest_gemm_fusion.h" #include <ostream> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" using ::testing::ElementsAre; namespace xla { static void PrintTo(const HloInstruction& hlo, std::ostream* os) { *os << hlo.ToString(); } namespace gpu { namespace { MATCHER_P(OutputTileSizesIs, matcher, "") { auto backend_config = arg.template backend_config<GpuBackendConfig>(); if (!backend_config.ok()) { *result_listener << "failed to get backend config: " << backend_config.status(); return false; } FusionBackendConfig fusion_backend_config = backend_config->fusion_backend_config(); if (!fusion_backend_config.has_block_level_fusion_config()) { *result_listener << "has no block level fusion config"; return false; } auto output_tile_sizes = fusion_backend_config.block_level_fusion_config().output_tile_sizes(); return ExplainMatchResult(matcher, output_tile_sizes, result_listener); } class NestGemmFusionTest : public HloTestBase {}; TEST_F(NestGemmFusionTest, BasicTest) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module dot { lhs = bf16[8192,512] parameter(0) rhs = bf16[512,512] parameter(1) ROOT %dot = bf16[8192,512] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY entry { p0 = bf16[8192,512] parameter(0) p1 = bf16[512,512] parameter(1) ROOT fusion = bf16[8192,512] fusion(p0, p1), kind=kCustom, calls=dot, backend_config={ "fusion_backend_config": { "kind":"__triton_gemm", "triton_gemm_config": { "block_m":"64", "block_n":"256", "block_k":"32", "split_k":"1", "num_stages":"1", "num_warps":"1", "num_ctas":"1" } } } } )")); TF_ASSERT_OK_AND_ASSIGN(bool changed, NestGemmFusion().Run(module.get())) EXPECT_TRUE(changed); TF_ASSERT_OK(verifier().Run(module.get()).status()); const HloInstruction* fusion = nullptr; ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(match::Fusion(&fusion))); EXPECT_THAT(*fusion, OutputTileSizesIs(ElementsAre(64, 256))); const HloInstruction* lhs = nullptr; const HloInstruction* rhs = nullptr; EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(match::Dot(match::Fusion(&lhs), match::Fusion(&rhs)))); EXPECT_THAT(*lhs, OutputTileSizesIs(ElementsAre(64, 32))); EXPECT_THAT(*rhs, OutputTileSizesIs(ElementsAre(32, 256))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
79b73ddc-d78b-4fc8-891a-abd1b5e2ac20
cpp
google/libaddressinput
region_data_builder
cpp/src/region_data_builder.cc
cpp/test/region_data_builder_test.cc
#include <libaddressinput/region_data_builder.h> #include <libaddressinput/address_data.h> #include <libaddressinput/preload_supplier.h> #include <libaddressinput/region_data.h> #include <cassert> #include <cstddef> #include <string> #include <vector> #include "language.h" #include "lookup_key.h" #include "region_data_constants.h" #include "rule.h" #include "util/size.h" namespace i18n { namespace addressinput { namespace { const size_t kLookupKeysMaxDepth = size(LookupKey::kHierarchy) - 1; void BuildRegionTreeRecursively( const std::map<std::string, const Rule*>& rules, std::map<std::string, const Rule*>::const_iterator hint, const LookupKey& parent_key, RegionData* parent_region, const std::vector<std::string>& keys, bool prefer_latin_name, size_t region_max_depth) { assert(parent_region != nullptr); LookupKey lookup_key; for (const auto& key : keys) { lookup_key.FromLookupKey(parent_key, key); const std::string lookup_key_string = lookup_key.ToKeyString(kLookupKeysMaxDepth); ++hint; if (hint == rules.end() || hint->first != lookup_key_string) { hint = rules.find(lookup_key_string); if (hint == rules.end()) { return; } } const Rule* rule = hint->second; assert(rule != nullptr); const std::string& local_name = rule->GetName().empty() ? key : rule->GetName(); const std::string& name = prefer_latin_name && !rule->GetLatinName().empty() ? rule->GetLatinName() : local_name; RegionData* region = parent_region->AddSubRegion(key, name); if (!rule->GetSubKeys().empty() && region_max_depth > parent_key.GetDepth()) { BuildRegionTreeRecursively(rules, hint, lookup_key, region, rule->GetSubKeys(), prefer_latin_name, region_max_depth); } } } RegionData* BuildRegion(const std::map<std::string, const Rule*>& rules, const std::string& region_code, const Language& language) { AddressData address; address.region_code = region_code; LookupKey lookup_key; lookup_key.FromAddress(address); auto hint = rules.find(lookup_key.ToKeyString(kLookupKeysMaxDepth)); assert(hint != rules.end()); const Rule* rule = hint->second; assert(rule != nullptr); auto* region = new RegionData(region_code); size_t region_max_depth = RegionDataConstants::GetMaxLookupKeyDepth(region_code); if (region_max_depth > 0) { BuildRegionTreeRecursively(rules, hint, lookup_key, region, rule->GetSubKeys(), language.has_latin_script, region_max_depth); } return region; } } RegionDataBuilder::RegionDataBuilder(PreloadSupplier* supplier) : supplier_(supplier), cache_() { assert(supplier_ != nullptr); } RegionDataBuilder::~RegionDataBuilder() { for (const auto& outer : cache_) { assert(outer.second != nullptr); for (const auto& inner : *outer.second) { delete inner.second; } delete outer.second; } } const RegionData& RegionDataBuilder::Build( const std::string& region_code, const std::string& ui_language_tag, std::string* best_region_tree_language_tag) { assert(supplier_->IsLoaded(region_code)); assert(best_region_tree_language_tag != nullptr); auto region_it = cache_.find(region_code); if (region_it == cache_.end()) { region_it = cache_.emplace(region_code, new LanguageRegionMap).first; } Rule rule; rule.ParseSerializedRule(RegionDataConstants::GetRegionData(region_code)); static const Language kUndefinedLanguage("und"); const Language best_language = rule.GetLanguages().empty() ? kUndefinedLanguage : ChooseBestAddressLanguage(rule, Language(ui_language_tag)); *best_region_tree_language_tag = best_language.tag; auto language_it = region_it->second->find(best_language.tag); if (language_it == region_it->second->end()) { const auto& rules = supplier_->GetRulesForRegion(region_code); language_it = region_it->second ->emplace(best_language.tag, BuildRegion(rules, region_code, best_language)) .first; } return *language_it->second; } } }
#include <libaddressinput/region_data_builder.h> #include <libaddressinput/callback.h> #include <libaddressinput/null_storage.h> #include <libaddressinput/preload_supplier.h> #include <libaddressinput/region_data.h> #include <memory> #include <string> #include <gtest/gtest.h> #include "testdata_source.h" namespace { using i18n::addressinput::BuildCallback; using i18n::addressinput::NullStorage; using i18n::addressinput::PreloadSupplier; using i18n::addressinput::RegionData; using i18n::addressinput::RegionDataBuilder; using i18n::addressinput::TestdataSource; class RegionDataBuilderTest : public testing::Test { public: RegionDataBuilderTest(const RegionDataBuilderTest&) = delete; RegionDataBuilderTest& operator=(const RegionDataBuilderTest&) = delete; protected: RegionDataBuilderTest() : supplier_(new TestdataSource(true), new NullStorage), builder_(&supplier_), loaded_callback_(BuildCallback(this, &RegionDataBuilderTest::OnLoaded)), best_language_() {} PreloadSupplier supplier_; RegionDataBuilder builder_; const std::unique_ptr<const PreloadSupplier::Callback> loaded_callback_; std::string best_language_; private: void OnLoaded(bool success, const std::string& region_code, int num_rules) { ASSERT_TRUE(success); ASSERT_FALSE(region_code.empty()); ASSERT_LT(0, num_rules); ASSERT_TRUE(supplier_.IsLoaded(region_code)); } }; TEST_F(RegionDataBuilderTest, BuildUsRegionTree) { supplier_.LoadRules("US", *loaded_callback_); const RegionData& tree = builder_.Build("US", "en-US", &best_language_); EXPECT_FALSE(tree.sub_regions().empty()); } TEST_F(RegionDataBuilderTest, BuildCnRegionTree) { supplier_.LoadRules("CN", *loaded_callback_); const RegionData& tree = builder_.Build("CN", "zh-Hans", &best_language_); ASSERT_FALSE(tree.sub_regions().empty()); EXPECT_FALSE(tree.sub_regions().front()->sub_regions().empty()); } TEST_F(RegionDataBuilderTest, BuildChRegionTree) { supplier_.LoadRules("CH", *loaded_callback_); const RegionData& tree = builder_.Build("CH", "de-CH", &best_language_); EXPECT_TRUE(tree.sub_regions().empty()); } TEST_F(RegionDataBuilderTest, BuildZwRegionTree) { supplier_.LoadRules("ZW", *loaded_callback_); const RegionData& tree = builder_.Build("ZW", "en-ZW", &best_language_); EXPECT_TRUE(tree.sub_regions().empty()); } TEST_F(RegionDataBuilderTest, UsTreeHasStateAbbreviationsAndNames) { supplier_.LoadRules("US", *loaded_callback_); const RegionData& tree = builder_.Build("US", "en-US", &best_language_); EXPECT_EQ("en", best_language_); ASSERT_FALSE(tree.sub_regions().empty()); EXPECT_EQ("AL", tree.sub_regions().front()->key()); EXPECT_EQ("Alabama", tree.sub_regions().front()->name()); } TEST_F(RegionDataBuilderTest, KrWithKoLatnLanguageHasKoreanKeysAndLatinScriptNames) { supplier_.LoadRules("KR", *loaded_callback_); const RegionData& tree = builder_.Build("KR", "ko-Latn", &best_language_); EXPECT_EQ("ko-Latn", best_language_); ASSERT_FALSE(tree.sub_regions().empty()); EXPECT_EQ("강원도", tree.sub_regions().front()->key()); EXPECT_EQ("Gangwon", tree.sub_regions().front()->name()); } TEST_F(RegionDataBuilderTest, KrWithKoKrLanguageHasKoreanKeysAndNames) { supplier_.LoadRules("KR", *loaded_callback_); const RegionData& tree = builder_.Build("KR", "ko-KR", &best_language_); EXPECT_EQ("ko", best_language_); ASSERT_FALSE(tree.sub_regions().empty()); EXPECT_EQ("강원도", tree.sub_regions().front()->key()); EXPECT_EQ("강원", tree.sub_regions().front()->name()); } }
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/region_data_builder.cc
https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/region_data_builder_test.cc
2610f7b1043d6784ada41392fc9392d1ea09ea07
31c1fe9d-b92e-40d4-9b3a-0cd0e17a79e6
cpp
tensorflow/tensorflow
dataset
tensorflow/core/framework/dataset.cc
tensorflow/core/framework/dataset_test.cc
#include "tensorflow/core/framework/dataset.h" #include <unordered_map> #include <vector> #include "tensorflow/core/activity_watcher/activity.h" #include "tensorflow/core/framework/dataset.pb.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/variant_encode_decode.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/resource.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/strcat.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/public/version.h" #if defined(PLATFORM_WINDOWS) #undef GetMessage #endif namespace tensorflow { namespace data { namespace { static mutex* get_dataset_op_registry_lock() { static mutex dataset_op_registry_lock(LINKER_INITIALIZED); return &dataset_op_registry_lock; } static std::unordered_set<string>* get_dataset_op_registry() { static std::unordered_set<string>* names = new std::unordered_set<string>; return names; } std::string UniqueNodeName(const std::string& base) { static std::atomic<int64_t> counter(0); return strings::StrCat(base, "/", counter.fetch_add(1)); } class DatasetVariantWrapper { public: DatasetVariantWrapper() : dataset_(nullptr) {} explicit DatasetVariantWrapper(DatasetBase* dataset) : dataset_(dataset) {} DatasetVariantWrapper(const DatasetVariantWrapper& other) : dataset_(other.dataset_) { if (dataset_) dataset_->Ref(); } DatasetVariantWrapper& operator=(DatasetVariantWrapper&& other) { if (&other == this) return *this; std::swap(dataset_, other.dataset_); return *this; } DatasetVariantWrapper& operator=(const DatasetVariantWrapper& other) = delete; ~DatasetVariantWrapper() { if (dataset_) dataset_->Unref(); } DatasetBase* get() const { return dataset_; } string TypeName() const { return "tensorflow::DatasetVariantWrapper"; } string DebugString() const { if (dataset_) { return dataset_->DebugString(); } else { return "<Uninitialized DatasetVariantWrapper>"; } } void Encode(VariantTensorData* data) const { LOG(ERROR) << "The Encode() method is not implemented for " "DatasetVariantWrapper objects."; } bool Decode(const VariantTensorData& data) { LOG(ERROR) << "The Decode() method is not implemented for " "DatasetVariantWrapper objects."; return false; } private: DatasetBase* dataset_; }; const char kWrappedDatasetVariantTypeName[] = "tensorflow::data::WrappedDatasetVariant"; class WrappedDatasetVariantWrapper { public: WrappedDatasetVariantWrapper() {} explicit WrappedDatasetVariantWrapper(const Tensor& ds_tensor) : ds_tensor_(ds_tensor) {} Tensor get() const { return ds_tensor_; } string TypeName() const { return "tensorflow::WrappedDatasetVariantWrapper"; } string DebugString() const { return "tensorflow::WrappedDatasetVariantWrapper::DebugString"; } void Encode(VariantTensorData* data) const { *(data->add_tensors()) = ds_tensor_; } bool Decode(const VariantTensorData& data) { ds_tensor_ = data.tensors(0); return true; } private: Tensor ds_tensor_; }; class WrapDatasetVariantOp : public OpKernel { public: explicit WrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& tensor = ctx->input(0); OP_REQUIRES(ctx, tensor.dtype() == DT_VARIANT && TensorShapeUtils::IsScalar(tensor.shape()), errors::InvalidArgument( "Dataset tensor must be a scalar of dtype DT_VARIANT.")); DatasetBase* unused; OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(tensor, &unused)); Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output)); output->scalar<Variant>()() = WrappedDatasetVariantWrapper(tensor); } }; REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant").Device(DEVICE_CPU), WrapDatasetVariantOp); REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant") .HostMemory("input_handle") .HostMemory("output_handle") .Device(DEVICE_GPU), WrapDatasetVariantOp); class UnwrapDatasetVariantOp : public OpKernel { public: explicit UnwrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& tensor = ctx->input(0); OP_REQUIRES(ctx, tensor.dtype() == DT_VARIANT && TensorShapeUtils::IsScalar(tensor.shape()), errors::InvalidArgument( "Dataset tensor must be a scalar of dtype DT_VARIANT.")); Variant variant = tensor.scalar<Variant>()(); const WrappedDatasetVariantWrapper* wrapper = variant.get<WrappedDatasetVariantWrapper>(); OP_REQUIRES(ctx, wrapper != nullptr, errors::InvalidArgument( "Tensor must be a WrappedDataset variant object.")); Tensor ds_tensor = wrapper->get(); OP_REQUIRES_OK(ctx, ctx->set_output("output_handle", ds_tensor)); } }; REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant").Device(DEVICE_CPU), UnwrapDatasetVariantOp); REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant") .HostMemory("input_handle") .HostMemory("output_handle") .Device(DEVICE_GPU), UnwrapDatasetVariantOp); static Status WrappedDatasetVariantDeviceCopy( const WrappedDatasetVariantWrapper& from, WrappedDatasetVariantWrapper* to, const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) { *to = WrappedDatasetVariantWrapper(from); return absl::OkStatus(); } #define REGISTER_OPTIONAL_COPY(DIRECTION) \ INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \ WrappedDatasetVariantWrapper, DIRECTION, \ WrappedDatasetVariantDeviceCopy) REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE); REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST); REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE); REGISTER_UNARY_VARIANT_DECODE_FUNCTION(WrappedDatasetVariantWrapper, kWrappedDatasetVariantTypeName); } Status GraphDefBuilderWrapper::AddDataset(const DatasetBase* dataset, const std::vector<Node*>& inputs, Node** output) { return AddDataset(dataset, inputs, {}, output); } Status GraphDefBuilderWrapper::AddDataset( const DatasetBase* dataset, const std::vector<Node*>& inputs, const std::vector<std::pair<StringPiece, AttrValue>>& attrs, Node** output) { std::vector<std::pair<size_t, Node*>> enumerated_inputs(inputs.size()); for (size_t i = 0; i < inputs.size(); i++) { enumerated_inputs[i] = std::make_pair(i, inputs[i]); } return AddDataset(dataset, enumerated_inputs, {}, attrs, output); } Status GraphDefBuilderWrapper::AddDataset( const DatasetBase* dataset, const std::vector<std::pair<size_t, Node*>>& inputs, const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs, const std::vector<std::pair<StringPiece, AttrValue>>& attrs, Node** output) { return AddDataset(dataset, inputs, list_inputs, attrs, false, output); } Status GraphDefBuilderWrapper::AddDataset( const DatasetBase* dataset, const std::vector<std::pair<size_t, Node*>>& inputs, const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs, const std::vector<std::pair<StringPiece, AttrValue>>& attrs, bool use_dataset_name, Node** output) { auto& type_string = dataset->type_string(); auto opts = absl::make_unique<GraphDefBuilder::Options>(b_->opts()); bool has_output_types_attr = HasAttr(type_string, "output_types"); bool has_output_shapes_attr = HasAttr(type_string, "output_shapes"); if (has_output_shapes_attr) { opts = absl::make_unique<GraphDefBuilder::Options>( opts->WithAttr("output_shapes", dataset->output_shapes())); } if (has_output_types_attr) { opts = absl::make_unique<GraphDefBuilder::Options>( opts->WithAttr("output_types", dataset->output_dtypes())); } bool has_metadata_attr = HasAttr(type_string, "metadata"); if (has_metadata_attr) { std::string serialized_metadata; dataset->metadata().SerializeToString(&serialized_metadata); opts = absl::make_unique<GraphDefBuilder::Options>( opts->WithAttr("metadata", serialized_metadata)); } for (const auto& attr : attrs) { opts = absl::make_unique<GraphDefBuilder::Options>( opts->WithAttr(attr.first, attr.second)); } if (opts->HaveError()) { return errors::Internal("AddDataset: Failed to build Options with error ", opts->StatusToString()); } NodeBuilder node_builder( use_dataset_name ? dataset->node_name() : opts->GetNameForOp(type_string), type_string, opts->op_registry()); { size_t total_size = inputs.size() + list_inputs.size(); auto inputs_iter = inputs.begin(); auto list_inputs_iter = list_inputs.begin(); for (int i = 0; i < total_size; i++) { if (inputs_iter != inputs.end() && inputs_iter->first == i) { node_builder.Input(NodeBuilder::NodeOut(inputs_iter->second)); inputs_iter++; } else if (list_inputs_iter != list_inputs.end() && list_inputs_iter->first == i) { std::vector<NodeBuilder::NodeOut> nodeout_inputs; nodeout_inputs.reserve(list_inputs_iter->second.size()); for (Node* n : list_inputs_iter->second) { nodeout_inputs.emplace_back(n); } node_builder.Input(nodeout_inputs); list_inputs_iter++; } else { return errors::InvalidArgument("No input found for index ", i); } } } *output = opts->FinalizeBuilder(&node_builder); if (*output == nullptr) { return errors::Internal("AddDataset: Failed to build ", type_string, " op with error ", opts->StatusToString()); } return absl::OkStatus(); } Status GraphDefBuilderWrapper::AddFunction( SerializationContext* ctx, const string& function_name, const FunctionLibraryDefinition& lib_def) { if (b_->HasFunction(function_name)) { VLOG(1) << "Function with name " << function_name << "already exists in" << " the graph. It will not be added again."; return absl::OkStatus(); } const FunctionDef* f_def = lib_def.Find(function_name); if (f_def == nullptr) { return errors::InvalidArgument("Unable to find FunctionDef for ", function_name, " in the registry."); } FunctionDefLibrary def; *def.add_function() = *f_def; const string gradient_func = lib_def.FindGradient(function_name); if (!gradient_func.empty()) { GradientDef* g_def = def.add_gradient(); g_def->set_function_name(function_name); g_def->set_gradient_func(gradient_func); } TF_RETURN_IF_ERROR(b_->AddFunctionLibrary(def)); for (const NodeDef& node_def : f_def->node_def()) { const OpRegistrationData* op_reg_data = nullptr; TF_RETURN_IF_ERROR(lib_def.LookUp(node_def.op(), &op_reg_data)); if (op_reg_data->is_function_op) { TF_RETURN_IF_ERROR(AddFunction(ctx, op_reg_data->op_def.name(), lib_def)); } for (const auto& pair : node_def.attr()) { TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, pair.second, lib_def)); } } for (auto iter = f_def->attr().begin(); iter != f_def->attr().end(); iter++) { TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, iter->second, lib_def)); } return absl::OkStatus(); } void GraphDefBuilderWrapper::AddPlaceholderInternal(const Tensor& val, Node** output) { *output = ops::SourceOp( "Placeholder", b_->opts().WithAttr("dtype", val.dtype()).WithAttr("shape", val.shape())); } void GraphDefBuilderWrapper::AddTensorInternal(const Tensor& val, Node** output) { *output = ops::SourceOp( "Const", b_->opts().WithAttr("dtype", val.dtype()).WithAttr("value", val)); } bool GraphDefBuilderWrapper::HasAttr(const string& name, const string& attr_name) const { const OpDef* op_def = nullptr; Status s = b_->opts().op_registry()->LookUpOpDef(name, &op_def); if (!s.ok() || op_def == nullptr) { return false; } return HasAttr(op_def, attr_name); } int32_t GetRunnerThreadpoolSizeFromOpKernelContext(OpKernelContext* ctx) { thread::ThreadPool* thread_pool = ctx->device()->tensorflow_device_thread_pool(); if (thread_pool) { return thread_pool->NumThreads(); } else { static const int32_t kDefaultRunnerThreadpoolSize = port::MaxParallelism(); return kDefaultRunnerThreadpoolSize; } } int64_t MemoryCheckpoint::IdRegistry::Add(const std::string& prefix, const std::string& key) { mutex_lock l(mu_); auto pair = std::make_pair(prefix, key); if (string_to_int_.contains(pair)) { return string_to_int_[pair]; } int64_t id = next_id_++; int_to_string_[id] = pair; string_to_int_[pair] = id; return id; } std::vector<int64_t> MemoryCheckpoint::IdRegistry::GetMatchingIds( const std::string& prefix_to_match) { mutex_lock l(mu_); std::vector<int64_t> ids; for (const auto& [pair, id] : string_to_int_) { auto [prefix, key] = pair; if (prefix.compare(0, prefix_to_match.length(), prefix_to_match) == 0) { ids.push_back(id); } } return ids; } std::pair<std::string, std::string> MemoryCheckpoint::IdRegistry::Get( int64_t id) { mutex_lock l(mu_); auto result = int_to_string_.find(id); DCHECK(result != int_to_string_.end()) << "Failed find id " << id << " in IdRegistry. " << "Max id is: " << next_id_ - 1; return result->second; } void MemoryCheckpoint::IdRegistry::RemoveIds(const std::vector<int64_t>& ids) { mutex_lock l(mu_); for (const auto& id : ids) { string_to_int_.erase(int_to_string_[id]); int_to_string_.erase(id); } } std::string MemoryCheckpoint::DebugString() const { std::string result = absl::StrCat("status=", status_.ToString(), ", " "root=", (is_root_ ? "true" : "false"), "\n"); absl::StrAppend(&result, "number of integers: ", int_values_.size(), "\n"); for (const auto& [k, v] : int_values_) { absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":", id_registry_->Get(k).second, ": ", v, "\n"); } absl::StrAppend(&result, "number of strings: ", str_values_.size(), "\n"); for (const auto& [k, v] : str_values_) { absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":", id_registry_->Get(k).second, ": ", v, "\n"); } absl::StrAppend(&result, "number of tensors: ", tensor_values_.size(), "\n"); absl::StrAppend( &result, "number of expired prefixes: ", expired_prefixes_.size(), "\n"); return result; } void MemoryCheckpoint::Merge(MemoryCheckpoint* other) { if (!status_.ok()) { return; } if (!other->status_.ok()) { status_ = other->status_; int_values_.clear(); str_values_.clear(); tensor_values_.clear(); } for (const auto& [k, v] : other->int_values_) { int_values_[k] = v; } for (const auto& [k, v] : other->str_values_) { str_values_[k] = v; } for (const auto& [k, v] : other->tensor_values_) { tensor_values_[k] = v; } for (const auto& prefix : other->expired_prefixes_) { Purge(prefix); } other->expired_prefixes_.clear(); VLOG(5) << "MemoryCheckpoint::Merge " << DebugString(); } void MemoryCheckpoint::Purge(const std::string& prefix) { std::vector<int64_t> ids = id_registry_->GetMatchingIds(prefix); for (const auto& id : ids) { int_values_.erase(id); str_values_.erase(id); tensor_values_.erase(id); } if (!is_root_) { expired_prefixes_.insert(prefix); } else { id_registry_->RemoveIds(ids); } } Status MemoryCheckpoint::Save(IteratorStateWriter* writer) const { for (const auto& [id, value] : int_values_) { auto [prefix, key] = id_registry_->Get(id); TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value)); } for (const auto& [id, value] : str_values_) { auto [prefix, key] = id_registry_->Get(id); TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value)); } for (const auto& [id, value] : tensor_values_) { auto [prefix, key] = id_registry_->Get(id); TF_RETURN_IF_ERROR(writer->WriteTensor(prefix, key, value)); } return absl::OkStatus(); } Status IteratorBase::InitializeBase(IteratorContext* ctx, const IteratorBase* parent) { parent_ = parent; id_ = Hash64CombineUnordered(Hash64(prefix()), reinterpret_cast<uint64>(this)); if (parent_) { parent_id_ = Hash64CombineUnordered(Hash64(parent_->prefix()), reinterpret_cast<uint64>(parent_)); if (const auto& model = ctx->model()) { auto factory = [ctx, this](model::Node::Args args) { return CreateNode(ctx, std::move(args)); }; model->AddNode(std::move(factory), prefix(), parent->model_node(), &node_); cleanup_fns_.push_back([this, model]() { model->RemoveNode(node_); }); } } return absl::OkStatus(); } Status GetCompressedElementFromVariantTensor( const Tensor& tensor, const CompressedElement** out_compressed_element) { if (!(tensor.dtype() == DT_VARIANT && TensorShapeUtils::IsScalar(tensor.shape()))) { return errors::InvalidArgument( "`CompressedElement` tensor must be a scalar of dtype `DT_VARIANT`."); } const Variant& variant = tensor.scalar<Variant>()(); const CompressedElement* compressed_element = variant.get<CompressedElement>(); if (compressed_element == nullptr) { return errors::InvalidArgument( "Tensor must be a `CompressedElement` object."); } *out_compressed_element = compressed_element; return absl::OkStatus(); } int64_t GetAllocatedBytes(const std::vector<Tensor>& element) { int64_t allocated_bytes = 0; for (auto& tensor : element) { if (tensor.dtype() == DT_VARIANT) { DatasetBase* dataset; if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) { allocated_bytes += dataset->AllocatedBytes(); continue; } const CompressedElement* compressed_element; if (GetCompressedElementFromVariantTensor(tensor, &compressed_element) .ok()) { allocated_bytes += compressed_element->ByteSizeLong(); continue; } } allocated_bytes += tensor.AllocatedBytes(); } return allocated_bytes; } int64_t GetTotalBytes(const std::vector<Tensor>& element) { int64_t total_bytes = 0; for (auto& tensor : element) { if (tensor.dtype() == DT_VARIANT) { DatasetBase* dataset; if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) { total_bytes += dataset->TotalBytes(); continue; } const CompressedElement* compressed_element; if (GetCompressedElementFromVariantTensor(tensor, &compressed_element) .ok()) { total_bytes += compressed_element->ByteSizeLong(); continue; } } total_bytes += tensor.TotalBytes(); } return total_bytes; } std::string FullName(const std::string& prefix, const std::string& name) { if (absl::StrContains(name, kColon)) { LOG(ERROR) << name << " should not contain " << kColon; } return strings::StrCat(kFullNameRandomHex, kPipe, prefix, kColon, name); } Status ExtractIteratorPrefix(StringPiece key, string* prefix) { if (!absl::StartsWith(key, data::kFullNameRandomHex)) { return errors::InvalidArgument("Key: ", key, " was not generated using full_name."); } std::vector<string> split_keys = str_util::Split(key, data::kPipe); if (split_keys.size() != 2) { return errors::InvalidArgument("Key: ", key, " was not generated using full_name."); } string real_key = split_keys[1]; const int pos = real_key.rfind(kColon); *prefix = real_key.substr(0, pos); return absl::OkStatus(); } Status GetDatasetFromVariantTensor(const Tensor& tensor, DatasetBase** out_dataset) { if (!(tensor.dtype() == DT_VARIANT && TensorShapeUtils::IsScalar(tensor.shape()))) { return errors::InvalidArgument( "Dataset tensor must be a scalar of dtype DT_VARIANT."); } const Variant& variant = tensor.scalar<Variant>()(); const DatasetVariantWrapper* wrapper = variant.get<DatasetVariantWrapper>(); if (wrapper == nullptr) { return errors::InvalidArgument("Tensor must be a Dataset object."); } *out_dataset = wrapper->get(); if (*out_dataset == nullptr) { return errors::Internal("Read uninitialized Dataset variant."); } return absl::OkStatus(); } Status StoreDatasetInVariantTensor(DatasetBase* dataset, Tensor* tensor) { if (!(tensor->dtype() == DT_VARIANT && TensorShapeUtils::IsScalar(tensor->shape()))) { return errors::InvalidArgument( "Dataset tensor must be a scalar of dtype DT_VARIANT."); } tensor->scalar<Variant>()() = DatasetVariantWrapper(dataset); return absl::OkStatus(); } namespace internal { #define WARN_PROTO_FIELD_CONFLICT(reflection, field, field_type, src, dst) \ { \ auto source_value = reflection->Get##field_type(src, field); \ auto destination_value = reflection->Get##field_type(*dst, field); \ if (source_value != destination_value) { \ LOG(WARNING) << "Changing the value of option field " << field->name() \ << " from " << destination_value << " to " << source_value; \ } \ } #define WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst) \ { \ auto source_value = reflection->GetEnum(src, field); \ auto destination_value = reflection->GetEnum(*dst, field); \ if (source_value != destination_value) { \ LOG(WARNING) << "Changing the value of option enum field " \ << field->name() << " from " \ << destination_value->full_name() << " to " \ << source_value->full_name(); \ } \ } void WarnProtoConflicts(const protobuf::Message& src, protobuf::Message* dst) { std::vector<const protobuf::FieldDescriptor*> set_src; std::vector<const protobuf::FieldDescriptor*> set_dst; const protobuf::Reflection* reflection = src.GetReflection(); reflection->ListFields(src, &set_src); reflection->ListFields(*dst, &set_dst); std::sort(set_src.begin(), set_src.end()); std::sort(set_dst.begin(), set_dst.end()); std::vector<const protobuf::FieldDescriptor*> in_both; std::set_intersection(set_src.begin(), set_src.end(), set_dst.begin(), set_dst.end(), std::back_inserter(in_both)); for (auto field : in_both) { if (field->name() == "framework_type") { continue; } if (field->type() == protobuf::FieldDescriptor::TYPE_MESSAGE) { WarnProtoConflicts(reflection->GetMessage(src, field), reflection->MutableMessage(dst, field)); } else { switch (field->cpp_type()) { case protobuf::FieldDescriptor::CPPTYPE_INT32: WARN_PROTO_FIELD_CONFLICT(reflection, field, Int32, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_INT64: WARN_PROTO_FIELD_CONFLICT(reflection, field, Int64, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_UINT32: WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt32, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_UINT64: WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt64, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_DOUBLE: WARN_PROTO_FIELD_CONFLICT(reflection, field, Double, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_FLOAT: WARN_PROTO_FIELD_CONFLICT(reflection, field, Float, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_BOOL: WARN_PROTO_FIELD_CONFLICT(reflection, field, Bool, src, dst); break; case protobuf::FieldDescriptor::CPPTYPE_ENUM: WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst); break; default: { LOG(ERROR) << "Unrecognized proto type for field " << field->full_name(); } } } } } #undef WARN_PROTO_ENUM_FIELD_CONFLICT #undef WARN_PROTO_FIELD_CONFLICT void MergeOptions(const protobuf::Message& source, protobuf::Message* destination) { WarnProtoConflicts(source, destination); destination->MergeFrom(source); } void MergeOptions(const protobuf::MessageLite& source, protobuf::MessageLite* destination) { destination->CheckTypeAndMergeFrom(source); } } void DatasetBase::Initialize(const Metadata& metadata) { Status s = ComputeNumSources(); if (!s.ok()) { LOG_EVERY_N_SEC(ERROR, 10) << s; } s = MergeOptionsFromInputs(); if (!s.ok()) { LOG_EVERY_N_SEC(ERROR, 10) << s; } metadata_ = metadata; if (metadata_.name() == "") { static std::atomic<int64_t> id_counter(0); *metadata_.mutable_name() = strings::StrCat(type_string(), ":", id_counter.fetch_add(1)); } } Status DatasetBase::ComputeNumSources() { std::vector<const DatasetBase*> inputs; Status s = InputDatasets(&inputs); if (errors::IsUnimplemented(s)) { return s; } if (num_sources_ >= 0) { return absl::OkStatus(); } num_sources_ = 0; if (inputs.empty()) { num_sources_ = 1; return absl::OkStatus(); } for (const auto& input : inputs) { if (input->num_sources() < 0) { return errors::FailedPrecondition( "Cannot compute input sources for dataset of type ", type_string(), ", because sources could not be computed for input dataset of type ", input->type_string()); } num_sources_ += input->num_sources(); } return absl::OkStatus(); } Status DatasetBase::CheckRandomAccessCompatible(const int64 index) const { CardinalityOptions options; options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_MODERATE); int64 cardinality = Cardinality(options); if (cardinality == kInfiniteCardinality || cardinality == kUnknownCardinality) { return tensorflow::errors::FailedPrecondition( "Dataset of type ", this->DebugString(), " has ", cardinality == kInfiniteCardinality ? "infinite" : "unknown", " cardinality, which does not support random access."); } if (index < 0 || index >= cardinality) { return errors::OutOfRange("Index out of range [0, ", cardinality, "):", index); } return absl::OkStatus(); } Status DatasetBase::Get(OpKernelContext* ctx, int64 index, std::vector<Tensor>* out_tensors) const { return errors::Unimplemented("Random access is not implemented for dataset ", DebugString()); } Status DatasetBase::Get(AnyContext ctx, int64 index, std::vector<Tensor>* out_tensors) const { return errors::Unimplemented("Random access is not implemented for dataset ", DebugString()); } absl::StatusOr<DatasetBase*> DatasetBase::Finalize( OpKernelContext* ctx, std::function<absl::StatusOr<core::RefCountPtr<DatasetBase>>()> make_finalized_dataset) const { mutex_lock l(mu_); if (!finalized_dataset_) { TF_ASSIGN_OR_RETURN(finalized_dataset_, make_finalized_dataset()); } return finalized_dataset_.get(); } Status DatasetBase::MergeOptionsFromInputs() { std::vector<const DatasetBase*> inputs; Status s = InputDatasets(&inputs); if (errors::IsUnimplemented(s)) { return s; } if (inputs.empty()) { return absl::OkStatus(); } Options merged_options = inputs[0]->options_; for (int i = 1; i < inputs.size(); ++i) { internal::MergeOptions(inputs[i]->options_, &merged_options); } internal::MergeOptions(options_, &merged_options); options_ = merged_options; return absl::OkStatus(); } Status DatasetBase::MakeIterator( IteratorContext* ctx, const IteratorBase* parent, const string& output_prefix, std::unique_ptr<IteratorBase>* iterator) const { if (type_string() == "OptionsDataset" || type_string() == "FinalizeDataset") { std::vector<const DatasetBase*> inputs; Status s = InputDatasets(&inputs); return inputs[0]->MakeIterator(ctx, parent, output_prefix, iterator); } tsl::profiler::TraceMe traceme( [&] { return tsl::profiler::TraceMeEncode( strings::StrCat("MakeIterator::", type_string()), {}); }, tsl::profiler::TraceMeLevel::kInfo); *iterator = MakeIteratorInternal(output_prefix); Status s = (*iterator)->InitializeBase(ctx, parent); if (s.ok()) { s.Update((*iterator)->Initialize(ctx)); ctx->SaveCheckpoint(iterator->get()); } if (!s.ok()) { iterator->reset(); } return s; } Status DatasetBase::MakeSplitProviders( std::vector<std::unique_ptr<SplitProvider>>* split_providers) const { std::vector<const DatasetBase*> inputs; Status s = InputDatasets(&inputs); if (errors::IsUnimplemented(s)) { return errors::Unimplemented( "Cannot create split providers for dataset of type ", type_string(), ", because the dataset implements neither `InputDatasets` nor " "`MakeSplitProvider`."); } if (inputs.size() != 1) { return errors::Unimplemented( "Cannot create split providers for dataset of type ", type_string(), ", because the dataset is not unary (instead having arity ", inputs.size(), "), and no custom implementation of `MakeSplitProvider` is defined."); } return inputs[0]->MakeSplitProviders(split_providers); } std::optional<int64_t> DatasetBase::GetEstimatedElementSize() const { const auto& shapes = output_shapes(); const auto& dtypes = output_dtypes(); if (shapes.size() != dtypes.size()) { LOG(ERROR) << "This should not happen because the sizes of output_shapes() " "and output_dtypes() should always be " "the same."; return std::nullopt; } size_t num_outputs = shapes.size(); int64_t element_size = 0; for (int i = 0; i < num_outputs; ++i) { const auto& partial_shape = shapes[i]; const auto& dtype = dtypes[i]; auto num_elements = partial_shape.num_elements(); if (num_elements == -1) { return std::nullopt; } element_size += num_elements * DataTypeSize(dtype); } return element_size; } int64_t DatasetBase::Cardinality() const { mutex_lock l(cardinality_mu_); if (cardinality_ == kUnknownCardinality) { CardinalityOptions options; cardinality_ = CardinalityInternal(options); } return cardinality_; } int64_t DatasetBase::Cardinality(CardinalityOptions options) const { mutex_lock l(cardinality_mu_); if (cardinality_ == kUnknownCardinality) { cardinality_ = CardinalityInternal(options); } return cardinality_; } Status DatasetBase::InputDatasets( std::vector<const DatasetBase*>* inputs) const { return errors::Unimplemented( "Cannot compute input sources for dataset of type ", type_string(), ", because the dataset does not implement `InputDatasets`. To fix this, " "your dataset should override the `InputDatasets` method. If it is a " "source dataset, it should return empty inputs."); } Status DatasetBase::DatasetGraphDefBuilder::AddInputDataset( SerializationContext* ctx, const DatasetBase* dataset, Node** output) { Status status = dataset->AsGraphDefInternal(ctx, this, output); if (ctx->is_graph_rewrite()) { if (status.ok()) { (*output)->AddAttr(kCardinalityAttrForRewrite, dataset->Cardinality()); } else if (errors::IsUnimplemented(status)) { Tensor t(DT_VARIANT, TensorShape({})); dataset->Ref(); TF_RETURN_IF_ERROR( StoreDatasetInVariantTensor(const_cast<DatasetBase*>(dataset), &t)); TF_RETURN_IF_ERROR(AddPlaceholder(t, output)); DCHECK_NE(ctx->input_list(), nullptr); ctx->input_list()->emplace_back((*output)->name(), std::move(t)); LOG_EVERY_N_SEC(WARNING, 30) << "Input of " << dataset->DebugString() << " will not be optimized because the dataset does not implement " "the " "AsGraphDefInternal() method needed to apply optimizations."; return absl::OkStatus(); } } return status; } Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensor( SerializationContext* ctx, const Tensor& t, Node** output) { if (t.dtype() == DT_VARIANT) { Status s = AddDatasetOrTensorHelper(ctx, t, output); if (s.ok()) { return s; } } if (t.dtype() == DT_RESOURCE && !ctx->is_graph_rewrite()) { Status s = AddResourceHelper(ctx, t, output); if (!errors::IsUnimplemented(s)) { return s; } } return AddTensor(t, output); } Status DatasetBase::DatasetGraphDefBuilder::AddIdentity( SerializationContext* ctx, const std::string& name_prefix, Node** input, Node** output) { *output = ops::UnaryOp("Identity", *input, builder()->opts().WithName(UniqueNodeName(name_prefix))); return absl::OkStatus(); } Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensorHelper( SerializationContext* ctx, const Tensor& t, Node** output) { if (t.dims() == 0) { DatasetBase* dataset; TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(t, &dataset)); return AddInputDataset(ctx, dataset, output); } std::vector<NodeBuilder::NodeOut> nodes; for (int i = 0; i < t.dim_size(0); ++i) { Node* node; TF_RETURN_IF_ERROR(AddDatasetOrTensorHelper(ctx, t.SubSlice(i), &node)); nodes.emplace_back(node); } auto op_name = "Pack"; auto opts = builder()->opts(); NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name, opts.op_registry()); node_builder.Input(std::move(nodes)); *output = opts.FinalizeBuilder(&node_builder); return absl::OkStatus(); } Status DatasetBase::DatasetGraphDefBuilder::AddResourceHelper( SerializationContext* ctx, const Tensor& t, Node** output) { if (t.NumElements() == 0) { return errors::InvalidArgument("Empty resouce handle"); } const ResourceHandle& handle = t.flat<ResourceHandle>()(0); if (ctx->device_name() != handle.device()) { return errors::InvalidArgument("Trying to access resource ", handle.name(), " located in device ", handle.device(), " from device ", ctx->device_name()); } ResourceBase* resource; TF_RETURN_IF_ERROR(ctx->resource_mgr()->Lookup(handle, &resource)); core::ScopedUnref unref(resource); return resource->AsGraphDef(builder(), output); } DatasetBaseIterator::DatasetBaseIterator(const BaseParams& params) : params_(params) { params_.dataset->Ref(); VLOG(2) << prefix() << " constructor"; strings::StrAppend(&traceme_metadata_, "name=", dataset()->metadata().name()); strings::StrAppend(&traceme_metadata_, ",shapes="); auto& shapes = output_shapes(); for (int i = 0; i < shapes.size(); ++i) { if (i > 0) { strings::StrAppend(&traceme_metadata_, " "); } strings::StrAppend(&traceme_metadata_, shapes.at(i).DebugString()); } strings::StrAppend(&traceme_metadata_, ",types="); auto& types = output_dtypes(); for (int i = 0; i < types.size(); ++i) { if (i > 0) { strings::StrAppend(&traceme_metadata_, " "); } strings::StrAppend(&traceme_metadata_, DataTypeString(types.at(i))); } } DatasetBaseIterator::~DatasetBaseIterator() { VLOG(2) << prefix() << " destructor"; params_.dataset->Unref(); } string DatasetBaseIterator::BuildTraceMeName() { string result = strings::StrCat(params_.prefix, "#", traceme_metadata_, ",id=", id_); if (parent_) { strings::StrAppend(&result, ",parent_id=", parent_id_); } TraceMeMetadata metadata = GetTraceMeMetadata(); for (const auto& pair : metadata) { strings::StrAppend(&result, ",", pair.first, "=", pair.second); } if (model_node() != nullptr) { if (model_node()->buffered_elements() > 0) { strings::StrAppend( &result, ",buffered_elements=", static_cast<long long>(model_node()->buffered_elements())); strings::StrAppend( &result, ",buffered_bytes_MB=", static_cast<long long>( static_cast<double>(model_node()->buffered_bytes()) * 1e-6)); } } strings::StrAppend(&result, "#"); return result; } Status DatasetBaseIterator::GetNext(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) { activity_watcher::ActivityScope activity_scope([&]() { activity_watcher::Activity::Attributes attributes; attributes["iterator_prefix"] = prefix(); return std::make_unique<activity_watcher::Activity>( "Iterator::GetNext", activity_watcher::ActivityCategory::kDatasetOp, std::move(attributes)); }); tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); }, tsl::profiler::TraceMeLevel::kInfo); DVLOG(3) << prefix() << " GetNext enter"; auto model = ctx->model(); bool output_was_recording = node_ && node_->output() && node_->output()->is_recording(); if (collect_resource_usage(ctx)) { int64_t now_nanos = EnvTime::NowNanos(); if (output_was_recording) { node_->output()->record_stop(now_nanos); } node_->record_start(now_nanos); } out_tensors->clear(); Status s = GetNextInternal(ctx, out_tensors, end_of_sequence); ctx->SaveCheckpoint(this); if (!SymbolicCheckpointCompatible()) { ctx->UpdateCheckpointStatus([this]() { return errors::Unimplemented(dataset()->type_string(), " does not support symbolic checkpointing."); }); } if (TF_PREDICT_TRUE(s.ok())) { if (TF_PREDICT_TRUE(!*end_of_sequence)) { if (TF_PREDICT_FALSE(out_tensors->size() != dataset()->output_dtypes().size())) { return errors::Internal("Expected ", dataset()->output_dtypes().size(), " components but got ", out_tensors->size(), "."); } RecordElement(ctx, out_tensors); } else { out_tensors->clear(); } } if (collect_resource_usage(ctx)) { int64_t now_nanos = EnvTime::NowNanos(); node_->record_stop(now_nanos); if (output_was_recording) { node_->output()->record_start(now_nanos); } } if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) { s = errors::Internal("Iterator \"", params_.prefix, "\" returned `OutOfRange`. This indicates an " "implementation error as `OutOfRange` errors are not " "expected to be returned here. Original message: ", s.message()); LOG(ERROR) << s; } DVLOG(3) << prefix() << " GetNext exit"; return s; } Status DatasetBaseIterator::Skip(IteratorContext* ctx, int num_to_skip, bool* end_of_sequence, int* num_skipped) { tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); }, tsl::profiler::TraceMeLevel::kInfo); DVLOG(3) << prefix() << " Skip enter"; auto model = ctx->model(); bool output_was_recording = node_ && node_->output() && node_->output()->is_recording(); if (collect_resource_usage(ctx)) { int64_t now_nanos = EnvTime::NowNanos(); auto output = node_->output(); if (output_was_recording) { output->record_stop(now_nanos); } node_->record_start(now_nanos); } Status s = SkipInternal(ctx, num_to_skip, end_of_sequence, num_skipped); if (collect_resource_usage(ctx)) { int64_t now_nanos = EnvTime::NowNanos(); node_->record_stop(now_nanos); auto output = node_->output(); if (output_was_recording) { output->record_start(now_nanos); } } if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) { s = errors::Internal("Iterator \"", params_.prefix, "\" returned `OutOfRange`. This indicates an " "implementation error as `OutOfRange` errors are not " "expected to be returned here. Original message: ", s.message()); LOG(ERROR) << s; } DVLOG(3) << prefix() << " Skip exit"; return s; } Status DatasetBaseIterator::SkipInternal(IteratorContext* ctx, int num_to_skip, bool* end_of_sequence, int* num_skipped) { *num_skipped = 0; for (int i = 0; i < num_to_skip; ++i) { std::vector<Tensor> out_tensors; TF_RETURN_IF_ERROR(GetNextInternal(ctx, &out_tensors, end_of_sequence)); if (*end_of_sequence) { return absl::OkStatus(); } RecordElement(ctx, &out_tensors); (*num_skipped)++; } return absl::OkStatus(); } void DatasetOpKernel::Compute(OpKernelContext* ctx) { DatasetBase* dataset = nullptr; MakeDataset(ctx, &dataset); if (ctx->status().ok()) { Tensor* output = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output)); OP_REQUIRES_OK(ctx, StoreDatasetInVariantTensor(dataset, output)); if (ctx->stack_trace().has_value() && VLOG_IS_ON(4)) { VLOG(4) << "Dataset " << dataset->type_string() << " created using the following stack trace:"; for (const auto& stack_frame : ctx->stack_trace()->ToStackFrames( {}, {}, false, -1)) { VLOG(4) << stack_frame.file_name << ":" << stack_frame.line_number << " in " << stack_frame.function_name << "()"; } } dataset->Initialize(metadata_); } } string DatasetOpKernel::TraceString(const OpKernelContext& ctx, bool verbose) const { return tsl::profiler::TraceMeOp(name_view(), type_string_view()); } bool DatasetOpKernel::IsDatasetOp(const OpDef& op_def) { if (op_def.output_arg_size() != 1) return false; if (op_def.output_arg(0).type() != DT_VARIANT) return false; absl::string_view op_name = op_def.name(); std::vector<std::string> v1, v2; if (absl::StartsWith(op_name, "__wrapped__")) { v1 = absl::StrSplit(op_name, "__wrapped__", absl::SkipEmpty()); if (v1.empty()) return false; v2 = absl::StrSplit(v1[0], "_", absl::SkipEmpty()); op_name = v2.empty() ? v1[0] : v2[0]; } if (op_name == "DatasetFromGraph") return true; if (absl::EndsWith(op_name, "Dataset")) return true; size_t index = op_name.length() - 1; while (index >= 0 && isdigit(op_name[index])) { index--; } constexpr absl::string_view kDatasetPrefix = "DatasetV"; constexpr absl::string_view::size_type kPrefixLength = kDatasetPrefix.size(); if (index < kPrefixLength - 1 || index == op_name.length() - 1) return false; return op_name.substr(index - kPrefixLength + 1, kPrefixLength) == kDatasetPrefix; } void UnaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx, DatasetBase** output) { DatasetBase* input; OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input)); MakeDataset(ctx, input, output); } void BinaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx, DatasetBase** output) { DatasetBase* input; OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input)); DatasetBase* another_input; OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(1), &another_input)); MakeDataset(ctx, input, another_input, output); } const char DatasetBase::kDatasetGraphKey[] = "_DATASET_GRAPH"; const char DatasetBase::kDatasetGraphOutputNodeKey[] = "_DATASET_GRAPH_OUTPUT_NODE"; BackgroundWorker::BackgroundWorker(Env* env, const char* name) : env_(env), name_(name) {} BackgroundWorker::~BackgroundWorker() { { mutex_lock l(mu_); cancelled_ = true; } cond_var_.notify_one(); thread_.reset(); } void BackgroundWorker::Schedule(std::function<void()> work_item) { { mutex_lock l(mu_); if (!thread_) { thread_ = absl::WrapUnique(env_->StartThread( {} , name_, [this]() { WorkerLoop(); })); } work_queue_.push_back(std::move(work_item)); } cond_var_.notify_one(); } void BackgroundWorker::WorkerLoop() { tensorflow::ResourceTagger tag(kTFDataResourceTag, "Background"); while (true) { std::function<void()> work_item = nullptr; { mutex_lock l(mu_); while (!cancelled_ && work_queue_.empty()) { cond_var_.wait(l); } if (cancelled_) { return; } DCHECK(!work_queue_.empty()); work_item = std::move(work_queue_.front()); work_queue_.pop_front(); } DCHECK(work_item != nullptr); work_item(); } } namespace { class RunnerImpl : public Runner { public: void Run(const std::function<void()>& f) override { tensorflow::ResourceTagger tag(kTFDataResourceTag, "Runner"); f(); PreventTailCall(); } private: virtual void PreventTailCall() {} }; } Runner* Runner::get() { static Runner* singleton = new RunnerImpl; return singleton; } } }
#include "tensorflow/core/framework/dataset.h" #include <memory> #include <tuple> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace data { TEST(DatasetTest, FullName) { EXPECT_EQ(FullName("prefix", "name"), "60d899aa0d8ce4351e7c3b419e92d25b|prefix:name"); } enum DataTypeTest { _tf_int_32, _tf_int_64, _tf_float_, _tf_double_, _tf_string_ }; struct DatasetTestParam { const DataTypeTest type; std::function<std::vector<Tensor>()> tensor_factory; const int64_t expected_bytes; }; class DatasetTestTotalBytes : public ::testing::TestWithParam<DatasetTestParam> {}; TEST_P(DatasetTestTotalBytes, TestTotalBytes) { const DatasetTestParam& test_case = GetParam(); if (test_case.type == _tf_string_) { EXPECT_LE(GetTotalBytes(test_case.tensor_factory()), test_case.expected_bytes); } else { EXPECT_EQ(GetTotalBytes(test_case.tensor_factory()), test_case.expected_bytes); } } std::vector<Tensor> tensor_tf_int_32s() { return {test::AsTensor<int32>({1, 2, 3, 4, 5}), test::AsTensor<int32>({1, 2, 3, 4})}; } std::vector<Tensor> tensor_tf_int_64s() { return {test::AsTensor<int64_t>({1, 2, 3, 4, 5}), test::AsTensor<int64_t>({10, 12})}; } std::vector<Tensor> tensor_tf_float_s() { return {test::AsTensor<float>({1.0, 2.0, 3.0, 4.0})}; } std::vector<Tensor> tensor_tf_double_s() { return {test::AsTensor<double>({100.0}), test::AsTensor<double>({200.0}), test::AsTensor<double>({400.0}), test::AsTensor<double>({800.0})}; } const tstring str = "test string"; std::vector<Tensor> tensor_strs() { return {test::AsTensor<tstring>({str})}; } INSTANTIATE_TEST_SUITE_P( DatasetTestTotalBytes, DatasetTestTotalBytes, ::testing::ValuesIn(std::vector<DatasetTestParam>{ {_tf_int_32, tensor_tf_int_32s, 4 * 9 }, {_tf_int_64, tensor_tf_int_64s, 8 * 7 }, {_tf_float_, tensor_tf_float_s, 4 * 4 }, {_tf_double_, tensor_tf_double_s, 8 * 4 }, {_tf_string_, tensor_strs, static_cast<int64_t>(sizeof(str) + str.size()) }})); struct MergeOptionsTestParam { const std::string source; const std::string destination; const std::string expected; }; class MergeOptionsTest : public ::testing::TestWithParam<MergeOptionsTestParam> {}; TEST_P(MergeOptionsTest, MergeOptions) { const MergeOptionsTestParam& test_case = GetParam(); Options source; CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.source, &source)); Options destination; CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.destination, &destination)); Options expected; CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.expected, &expected)); internal::MergeOptions(source, &destination); EXPECT_EQ(expected.SerializeAsString(), destination.SerializeAsString()); } INSTANTIATE_TEST_SUITE_P( MergeOptionsTest, MergeOptionsTest, ::testing::ValuesIn(std::vector<MergeOptionsTestParam>{ {"deterministic: false", "", "deterministic: false"}, {"deterministic: false", "deterministic: false", "deterministic: false"}, {"deterministic: false", "deterministic: true", "deterministic: false"}, {"external_state_policy: POLICY_IGNORE", "external_state_policy: POLICY_FAIL", "external_state_policy: POLICY_IGNORE"}})); TEST(DatasetTest, IsDatasetOp) { OpDef op_def; EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def)); op_def.add_output_arg()->set_type(DT_STRING); EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def)); op_def.mutable_output_arg(0)->set_type(DT_VARIANT); op_def.set_name("Identity"); EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def)); for (const auto& name : {"Dataset", "RangeDataset", "MapDatasetV1", "ParallelInterleaveDatasetV42", "DataServiceDatasetV1000", "DatasetFromGraph"}) { op_def.set_name(name); EXPECT_TRUE(DatasetOpKernel::IsDatasetOp(op_def)); } } TEST(DatasetTest, IdRegistry) { MemoryCheckpoint::IdRegistry id_registry; auto id_1 = id_registry.Add("foo", "key_1"); auto id_2 = id_registry.Add("foo:bar", "key_2"); auto id_3 = id_registry.Add("foo:bar:baz", "key_3"); auto [prefix_1, key_1] = id_registry.Get(id_1); EXPECT_EQ(prefix_1, "foo"); EXPECT_EQ(key_1, "key_1"); auto [prefix_2, key_2] = id_registry.Get(id_2); EXPECT_EQ(prefix_2, "foo:bar"); EXPECT_EQ(key_2, "key_2"); auto [prefix_3, key_3] = id_registry.Get(id_3); EXPECT_EQ(prefix_3, "foo:bar:baz"); EXPECT_EQ(key_3, "key_3"); auto matching_ids = id_registry.GetMatchingIds("hello"); EXPECT_EQ(matching_ids.size(), 0); matching_ids = id_registry.GetMatchingIds("foo:bar:baz"); EXPECT_EQ(matching_ids.size(), 1); matching_ids = id_registry.GetMatchingIds("foo:bar"); EXPECT_EQ(matching_ids.size(), 2); matching_ids = id_registry.GetMatchingIds("foo"); EXPECT_EQ(matching_ids.size(), 3); matching_ids = id_registry.GetMatchingIds("f"); EXPECT_EQ(matching_ids.size(), 3); absl::flat_hash_set<int64_t> matching_ids_set(matching_ids.begin(), matching_ids.end()); EXPECT_TRUE(matching_ids_set.contains(id_1)); EXPECT_TRUE(matching_ids_set.contains(id_2)); EXPECT_TRUE(matching_ids_set.contains(id_3)); id_registry.RemoveIds(matching_ids); matching_ids = id_registry.GetMatchingIds("foo"); EXPECT_EQ(matching_ids.size(), 0); } TEST(DatasetTest, MemoryCheckpointWrites) { std::shared_ptr<MemoryCheckpoint::IdRegistry> id_registry = std::make_shared<MemoryCheckpoint::IdRegistry>(); MemoryCheckpoint memory_checkpoint(id_registry); Tensor input_tensor(DT_FLOAT, {1}); input_tensor.flat<float>()(0) = 2.0f; TF_EXPECT_OK(memory_checkpoint.WriteScalar("name_foo", "key_bar", 5)); TF_EXPECT_OK( memory_checkpoint.WriteTensor("name_corgi", "key_baz", input_tensor)); auto matching_ids = id_registry->GetMatchingIds("name_foo"); EXPECT_EQ(matching_ids.size(), 1); auto id = matching_ids.at(0); auto [_, key] = id_registry->Get(id); EXPECT_EQ(key, "key_bar"); matching_ids = id_registry->GetMatchingIds("name_corgi"); EXPECT_EQ(matching_ids.size(), 1); id = matching_ids.at(0); std::tie(_, key) = id_registry->Get(id); EXPECT_EQ(key, "key_baz"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9f7a8b01-7a36-410b-8f64-3545915fd10c
cpp
google/tensorstore
downsample_array
tensorstore/driver/downsample/downsample_array.cc
tensorstore/driver/downsample/downsample_array_test.cc
#include "tensorstore/driver/downsample/downsample_array.h" #include "absl/status/status.h" #include "tensorstore/array.h" #include "tensorstore/downsample_method.h" #include "tensorstore/driver/downsample/downsample_nditerable.h" #include "tensorstore/driver/downsample/downsample_util.h" #include "tensorstore/index.h" #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/internal/arena.h" #include "tensorstore/internal/nditerable.h" #include "tensorstore/internal/nditerable_array.h" #include "tensorstore/internal/nditerable_copy.h" #include "tensorstore/internal/nditerable_transformed_array.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" namespace tensorstore { namespace internal_downsample { namespace { absl::Status ValidateDownsampleDomain(BoxView<> base_domain, BoxView<> downsampled_domain, span<const Index> downsample_factors, DownsampleMethod method) { const DimensionIndex rank = base_domain.rank(); if (rank != downsampled_domain.rank()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Cannot downsample domain ", base_domain, " to domain ", downsampled_domain, " with different rank")); } if (rank != downsample_factors.size()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Cannot downsample domain ", base_domain, " with downsample factors ", downsample_factors, " of different rank")); } for (DimensionIndex i = 0; i < rank; ++i) { const auto expected_interval = DownsampleInterval(base_domain[i], downsample_factors[i], method); if (expected_interval != downsampled_domain[i]) { return absl::InvalidArgumentError(tensorstore::StrCat( "Cannot downsample array with domain ", base_domain, " by factors ", downsample_factors, " with method ", method, " to array with domain ", downsampled_domain, ": expected target dimension ", i, " to have domain ", expected_interval)); } } return absl::OkStatus(); } } absl::Status DownsampleArray(OffsetArrayView<const void> source, OffsetArrayView<void> target, span<const Index> downsample_factors, DownsampleMethod method) { if (source.dtype() != target.dtype()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Source data type (", source.dtype(), ") does not match target data type (", target.dtype(), ")")); } TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method)); TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleDomain( source.domain(), target.domain(), downsample_factors, method)); if (method == DownsampleMethod::kStride) { return CopyTransformedArray( source | tensorstore::AllDims().Stride(downsample_factors), target); } internal::DefaultNDIterableArena arena; auto base_iterable = GetArrayNDIterable(UnownedToShared(source), arena); auto target_iterable = GetArrayNDIterable(UnownedToShared(target), arena); auto downsampled_iterable = DownsampleNDIterable( std::move(base_iterable), source.domain(), downsample_factors, method, downsample_factors.size(), arena); internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable, target.shape(), skip_repeated_elements, arena); return copier.Copy(); } Result<SharedOffsetArray<void>> DownsampleArray( OffsetArrayView<const void> source, span<const Index> downsample_factors, DownsampleMethod method) { SharedOffsetArray<void> target; target.layout().set_rank(source.rank()); DownsampleBounds(source.domain(), MutableBoxView<>(target.origin(), target.shape()), downsample_factors, method); target.element_pointer() = AllocateArrayElementsLike<void>( StridedLayoutView<dynamic_rank, offset_origin>( target.rank(), target.origin().data(), target.shape().data(), source.byte_strides().data()), target.byte_strides().data(), skip_repeated_elements, default_init, source.dtype()); TENSORSTORE_RETURN_IF_ERROR( DownsampleArray(source, target, downsample_factors, method)); return target; } absl::Status DownsampleTransformedArray(TransformedArrayView<const void> source, TransformedArrayView<void> target, span<const Index> downsample_factors, DownsampleMethod method) { if (source.dtype() != target.dtype()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Source data type (", source.dtype(), ") does not match target data type (", target.dtype(), ")")); } TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method)); TENSORSTORE_RETURN_IF_ERROR( ValidateDownsampleDomain(source.domain().box(), target.domain().box(), downsample_factors, method)); if (method == DownsampleMethod::kStride) { return CopyTransformedArray( std::move(source) | tensorstore::AllDims().Stride(downsample_factors), target); } internal::DefaultNDIterableArena arena; TENSORSTORE_ASSIGN_OR_RETURN( auto base_iterable, GetTransformedArrayNDIterable(UnownedToShared(source), arena)); TENSORSTORE_ASSIGN_OR_RETURN( auto target_iterable, GetTransformedArrayNDIterable(UnownedToShared(target), arena)); auto downsampled_iterable = DownsampleNDIterable( std::move(base_iterable), source.domain().box(), downsample_factors, method, downsample_factors.size(), arena); internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable, target.shape(), skip_repeated_elements, arena); return copier.Copy(); } Result<SharedOffsetArray<void>> DownsampleTransformedArray( TransformedArrayView<const void> source, span<const Index> downsample_factors, DownsampleMethod method) { SharedOffsetArray<void> target; target.layout().set_rank(source.rank()); DownsampleBounds(source.domain().box(), MutableBoxView<>(target.origin(), target.shape()), downsample_factors, method); target = AllocateArray(target.domain(), c_order, default_init, source.dtype()); TENSORSTORE_RETURN_IF_ERROR(DownsampleTransformedArray( source, TransformedArray(target), downsample_factors, method)); return target; } } }
#include "tensorstore/driver/downsample/downsample_array.h" #include <stdint.h> #include <gmock/gmock.h> #include <gtest/gtest.h> #include <nlohmann/json.hpp> #include "tensorstore/array.h" #include "tensorstore/array_testutil.h" #include "tensorstore/data_type.h" #include "tensorstore/downsample_method.h" #include "tensorstore/index.h" #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/util/span.h" namespace { using ::tensorstore::Dims; using ::tensorstore::DownsampleMethod; using ::tensorstore::Index; using ::tensorstore::kImplicit; using ::tensorstore::MakeArray; using ::tensorstore::MakeOffsetArray; using ::tensorstore::span; using ::tensorstore::internal_downsample::DownsampleArray; using ::tensorstore::internal_downsample::DownsampleTransformedArray; using ::testing::Optional; TEST(DownsampleArrayTest, MeanRank0) { EXPECT_THAT(DownsampleArray(tensorstore::MakeScalarArray<float>(42.0), span<const Index>(), DownsampleMethod::kMean), Optional(tensorstore::MakeScalarArray<float>(42.0))); } TEST(DownsampleArrayTest, MeanRank1ExactMultiple) { EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({1.5, 6}))); EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 3, 5, 7, 12}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<float>({2, 8}))); } TEST(DownsampleArrayTest, MeanRoundingUint8) { EXPECT_THAT(DownsampleArray(MakeArray<uint8_t>({253, 254, 254}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<uint8_t>({254}))); } TEST(DownsampleArrayTest, MeanRoundingInt16) { EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-253, -254, -254}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<int16_t>({-254}))); } TEST(DownsampleArrayTest, MeanRoundingToEvenInt16) { EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 2, 2}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<int16_t>({2}))); EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 4, 4}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<int16_t>({4}))); EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -2, -2}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<int16_t>({-2}))); EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -4, -4}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<int16_t>({-4}))); } TEST(DownsampleArrayTest, MeanRoundingUint64) { EXPECT_THAT(DownsampleArray(MakeArray<uint64_t>({253, 254, 254}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<uint64_t>({254}))); } TEST(DownsampleArrayTest, MeanRoundingBool) { EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<bool>({0}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<bool>({1}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 0}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<bool>({0}))); } TEST(DownsampleArrayTest, MeanRank1Offset) { EXPECT_THAT(DownsampleArray(MakeOffsetArray<float>({1}, {1, 2, 5, 9}), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({1, 3.5, 9}))); } TEST(DownsampleArrayTest, MeanRank1SingleDownsampledElement) { EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2}), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({1.5}))); } TEST(DownsampleArrayTest, MeanRank1NotExactMultiple) { EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7, 9}), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({1.5, 6, 9}))); EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 6, 7, 9}), span<const Index>({3}), DownsampleMethod::kMean), Optional(MakeArray<float>({3, 8}))); } TEST(DownsampleArrayTest, MeanRank1NoDownsampling) { EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}), span<const Index>({1}), DownsampleMethod::kMean), Optional(MakeArray<float>({1, 2, 5, 7}))); } TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim1) { EXPECT_THAT( DownsampleArray(MakeArray<float>({ {1, 2, 5, 7}, {5, 6, 15, 25}, }), span<const Index>({1, 2}), DownsampleMethod::kMean), Optional(MakeArray<float>({{1.5, 6}, {5.5, 20}}))); } TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim0) { EXPECT_THAT( DownsampleArray(MakeArray<float>({ {1, 2, 5, 7}, {5, 6, 15, 25}, }), span<const Index>({2, 1}), DownsampleMethod::kMean), Optional(MakeArray<float>({{3, 4, 10, 16}}))); } TEST(DownsampleArrayTest, MeanRank2TwoDownsampleDims) { EXPECT_THAT( DownsampleArray(MakeArray<float>({ {1, 2, 5, 7}, {5, 6, 15, 25}, }), span<const Index>({2, 2}), DownsampleMethod::kMean), Optional(MakeArray<float>({{3.5, 13.0}}))); } TEST(DownsampleArrayTest, MeanRank2NotExactMultiple) { EXPECT_THAT( DownsampleArray(MakeArray<float>({ {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}, }), span<const Index>({2, 2}), DownsampleMethod::kMean), Optional(MakeArray<float>({ {4, 6, 7.5}, {11.5, 13.5, 15}, }))); } TEST(DownsampleArrayTest, MeanRank2PartialStartBlock) { EXPECT_THAT( DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}}), span<const Index>({2, 3}), DownsampleMethod::kMean), Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {8.5, 10.5, 12.5}}))); } TEST(DownsampleArrayTest, MedianRank2PartialStartBlock) { EXPECT_THAT( DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}}), span<const Index>({2, 3}), DownsampleMethod::kMedian), Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 9, 10}}))); } TEST(DownsampleArrayTest, ModeRank2PartialStartBlock) { EXPECT_THAT( DownsampleArray(MakeOffsetArray<float>({3, 8}, { {1, 2, 3, 3, 5}, {6, 4, 5, 5, 10}, {11, 6, 6, 6, 15}, }), span<const Index>({2, 3}), DownsampleMethod::kMode), Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 6, 10}}))); } TEST(DownsampleArrayTest, StrideRank2PartialEndBlock) { EXPECT_THAT( DownsampleArray(MakeOffsetArray<float>({2, 6}, { {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}, }), span<const Index>({2, 3}), DownsampleMethod::kStride), Optional(MakeOffsetArray<float>({1, 2}, { {1, 4}, {11, 14}, }))); } TEST(DownsampleArrayTest, StrideRank2PartialStartBlock) { EXPECT_THAT( DownsampleArray(MakeOffsetArray<float>({3, 8}, { {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}, }), span<const Index>({2, 3}), DownsampleMethod::kStride), Optional(MakeOffsetArray<float>({2, 3}, { {7, 10}, }))); } TEST(DownsampleArrayTest, MeanRank3ThreeDownsampleDims) { EXPECT_THAT( DownsampleArray(MakeArray<float>({{ {1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}, }, { {13, 14, 15, 16}, {17, 18, 19, 20}, {21, 22, 23, 24}, }, { {25, 26, 27, 28}, {29, 30, 31, 32}, {33, 34, 35, 36}, }}), span<const Index>({2, 2, 2}), DownsampleMethod::kMean), Optional(MakeArray<float>({{ {9.5, 11.5}, {15.5, 17.5}, }, { {27.5, 29.5}, {33.5, 35.5}, }}))); } TEST(DownsampleArrayTest, MeanRank1ReversedExactMultiple) { EXPECT_THAT(DownsampleTransformedArray( (MakeArray<float>({1, 2, 3, 4}) | Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1)) .value(), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({3.5, 1.5}))); } TEST(DownsampleArrayTest, MeanRank1ReversedNotExactMultiple) { EXPECT_THAT(DownsampleTransformedArray( (MakeArray<float>({1, 2, 3, 4, 5}) | Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1)) .value(), span<const Index>({2}), DownsampleMethod::kMean), Optional(MakeArray<float>({4.5, 2.5, 1}))); } TEST(DownsampleArrayTest, MeanRank2ReversedNotExactMultiple) { EXPECT_THAT(DownsampleTransformedArray( (MakeArray<float>({ {1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}, }) | Dims(0, 1).TranslateSizedInterval(kImplicit, kImplicit, -1)) .value(), span<const Index>({2, 2}), DownsampleMethod::kMean), Optional(MakeArray<float>({ {12, 10, 8.5}, {4.5, 2.5, 1}, }))); } TEST(DownsampleArrayTest, MinRank1ExactMultiple) { EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}), span<const Index>({2}), DownsampleMethod::kMin), Optional(MakeArray<float>({2, 1}))); EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}), span<const Index>({3}), DownsampleMethod::kMin), Optional(MakeArray<int>({2, 1}))); } TEST(DownsampleArrayTest, MaxRank1ExactMultiple) { EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}), span<const Index>({2}), DownsampleMethod::kMax), Optional(MakeArray<float>({3, 5}))); EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}), span<const Index>({3}), DownsampleMethod::kMax), Optional(MakeArray<int>({8, 7}))); } TEST(DownsampleArrayTest, MedianRank1ExactMultiple) { EXPECT_THAT( DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 98, 97, 5}), span<const Index>({4}), DownsampleMethod::kMedian), Optional(MakeArray<float>({2, 97}))); } TEST(DownsampleArrayTest, MedianRank1Partial) { EXPECT_THAT( DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 97, 98}), span<const Index>({4}), DownsampleMethod::kMedian), Optional(MakeArray<float>({2, 98}))); } TEST(DownsampleArrayTest, ModeRank1ExactMultiple) { EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2, 2}), span<const Index>({4}), DownsampleMethod::kMode), Optional(MakeArray<float>({99, 2}))); } TEST(DownsampleArrayTest, ModeRank1Partial) { EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2}), span<const Index>({4}), DownsampleMethod::kMode), Optional(MakeArray<float>({99, 3}))); } TEST(DownsampleArrayTest, ModeBool) { EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}), DownsampleMethod::kMode), Optional(MakeArray<bool>({0}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}), DownsampleMethod::kMode), Optional(MakeArray<bool>({1}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}), DownsampleMethod::kMode), Optional(MakeArray<bool>({1}))); } TEST(DownsampleArrayTest, MeanBool) { EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<bool>({0}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}), DownsampleMethod::kMean), Optional(MakeArray<bool>({1}))); EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}), DownsampleMethod::kMean), Optional(MakeArray<bool>({1}))); } TEST(DownsampleArrayTest, MedianBool) { EXPECT_THAT( DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}), DownsampleMethod::kMedian), Optional(MakeArray<bool>({0}))); EXPECT_THAT( DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}), DownsampleMethod::kMedian), Optional(MakeArray<bool>({1}))); EXPECT_THAT( DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}), DownsampleMethod::kMedian), Optional(MakeArray<bool>({1}))); } TEST(DownsampleArrayTest, ModeJson) { using ::tensorstore::dtypes::json_t; EXPECT_THAT(DownsampleArray(MakeArray<json_t>({"a", "a", 3.0, 3, 3u}), span<const Index>({5}), DownsampleMethod::kMode), Optional(MakeArray<::nlohmann::json>({json_t(3)}))); } TEST(DownsampleArrayTest, MultipleBlocks) { auto source_array = tensorstore::AllocateArray<uint8_t>({128, 128}); auto expected_downsampled = tensorstore::AllocateArray<uint8_t>({64, 64}); for (int i = 0; i < 128; ++i) { for (int j = 0; j < 128; ++j) { source_array(i, j) = static_cast<uint8_t>(i); } } for (int i = 0; i < 64; ++i) { for (int j = 0; j < 64; ++j) { expected_downsampled(i, j) = static_cast<uint8_t>(i * 2); } } EXPECT_THAT(DownsampleArray(source_array, {{2, 2}}, DownsampleMethod::kMean), Optional(tensorstore::MatchesArray(expected_downsampled))); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array_test.cc
4f887a6430414cd6088e1743555015b10f116d50